ngram
listlengths
0
82k
[ "PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Soccer trajectory and event data generation\", keywords=\"soccer", "setup, find_packages setup( name=\"soccergen\", version=\"0.1\", packages=find_packages(), # Project uses reStructuredText,", "target machine install_requires=[\"gfootball>=2.8\",], # metadata to display on PyPI author=\"<NAME>\",", "project_urls={ \"Issues\": \"https://github.com/pnxenopoulos/soccer-data-gen/issues\", \"Documentation\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", \"Github\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", }, classifiers=[\"License ::", "import setup, find_packages setup( name=\"soccergen\", version=\"0.1\", packages=find_packages(), # Project uses", "project home page, if any project_urls={ \"Issues\": \"https://github.com/pnxenopoulos/soccer-data-gen/issues\", \"Documentation\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\",", "page, if any project_urls={ \"Issues\": \"https://github.com/pnxenopoulos/soccer-data-gen/issues\", \"Documentation\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", \"Github\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\",", "Project uses reStructuredText, so ensure that the docutils get #", "\"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", }, classifiers=[\"License :: OSI Approved :: MIT License\"], )", "or upgraded on the target machine install_requires=[\"gfootball>=2.8\",], # metadata to", "if any project_urls={ \"Issues\": \"https://github.com/pnxenopoulos/soccer-data-gen/issues\", \"Documentation\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", \"Github\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", },", "reStructuredText, so ensure that the docutils get # installed or", "so ensure that the docutils get # installed or upgraded", "installed or upgraded on the target machine install_requires=[\"gfootball>=2.8\",], # metadata", "generation\", keywords=\"soccer data-generation foootball\", url=\"https://github.com/pnxenopoulos/soccer-data-gen\", # project home page, if", "\"Documentation\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", \"Github\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", }, classifiers=[\"License :: OSI Approved ::", "\"Issues\": \"https://github.com/pnxenopoulos/soccer-data-gen/issues\", \"Documentation\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", \"Github\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", }, classifiers=[\"License :: OSI", "foootball\", url=\"https://github.com/pnxenopoulos/soccer-data-gen\", # project home page, if any project_urls={ \"Issues\":", "\"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", \"Github\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", }, classifiers=[\"License :: OSI Approved :: MIT", "metadata to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Soccer trajectory and", "author_email=\"<EMAIL>\", description=\"Soccer trajectory and event data generation\", keywords=\"soccer data-generation foootball\",", "docutils get # installed or upgraded on the target machine", "\"Github\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", }, classifiers=[\"License :: OSI Approved :: MIT License\"],", "upgraded on the target machine install_requires=[\"gfootball>=2.8\",], # metadata to display", "install_requires=[\"gfootball>=2.8\",], # metadata to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Soccer", "version=\"0.1\", packages=find_packages(), # Project uses reStructuredText, so ensure that the", "home page, if any project_urls={ \"Issues\": \"https://github.com/pnxenopoulos/soccer-data-gen/issues\", \"Documentation\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", \"Github\":", "the docutils get # installed or upgraded on the target", "on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Soccer trajectory and event data generation\",", "uses reStructuredText, so ensure that the docutils get # installed", "name=\"soccergen\", version=\"0.1\", packages=find_packages(), # Project uses reStructuredText, so ensure that", "from setuptools import setup, find_packages setup( name=\"soccergen\", version=\"0.1\", packages=find_packages(), #", "# metadata to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Soccer trajectory", "data-generation foootball\", url=\"https://github.com/pnxenopoulos/soccer-data-gen\", # project home page, if any project_urls={", "setup( name=\"soccergen\", version=\"0.1\", packages=find_packages(), # Project uses reStructuredText, so ensure", "the target machine install_requires=[\"gfootball>=2.8\",], # metadata to display on PyPI", "ensure that the docutils get # installed or upgraded on", "trajectory and event data generation\", keywords=\"soccer data-generation foootball\", url=\"https://github.com/pnxenopoulos/soccer-data-gen\", #", "display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Soccer trajectory and event data", "event data generation\", keywords=\"soccer data-generation foootball\", url=\"https://github.com/pnxenopoulos/soccer-data-gen\", # project home", "# project home page, if any project_urls={ \"Issues\": \"https://github.com/pnxenopoulos/soccer-data-gen/issues\", \"Documentation\":", "on the target machine install_requires=[\"gfootball>=2.8\",], # metadata to display on", "# installed or upgraded on the target machine install_requires=[\"gfootball>=2.8\",], #", "to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Soccer trajectory and event", "keywords=\"soccer data-generation foootball\", url=\"https://github.com/pnxenopoulos/soccer-data-gen\", # project home page, if any", "and event data generation\", keywords=\"soccer data-generation foootball\", url=\"https://github.com/pnxenopoulos/soccer-data-gen\", # project", "setuptools import setup, find_packages setup( name=\"soccergen\", version=\"0.1\", packages=find_packages(), # Project", "description=\"Soccer trajectory and event data generation\", keywords=\"soccer data-generation foootball\", url=\"https://github.com/pnxenopoulos/soccer-data-gen\",", "\"https://github.com/pnxenopoulos/soccer-data-gen/issues\", \"Documentation\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", \"Github\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", }, classifiers=[\"License :: OSI Approved", "get # installed or upgraded on the target machine install_requires=[\"gfootball>=2.8\",],", "url=\"https://github.com/pnxenopoulos/soccer-data-gen\", # project home page, if any project_urls={ \"Issues\": \"https://github.com/pnxenopoulos/soccer-data-gen/issues\",", "machine install_requires=[\"gfootball>=2.8\",], # metadata to display on PyPI author=\"<NAME>\", author_email=\"<EMAIL>\",", "any project_urls={ \"Issues\": \"https://github.com/pnxenopoulos/soccer-data-gen/issues\", \"Documentation\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", \"Github\": \"https://github.com/pnxenopoulos/soccer-data-gen/csgo/\", }, classifiers=[\"License", "# Project uses reStructuredText, so ensure that the docutils get", "find_packages setup( name=\"soccergen\", version=\"0.1\", packages=find_packages(), # Project uses reStructuredText, so", "data generation\", keywords=\"soccer data-generation foootball\", url=\"https://github.com/pnxenopoulos/soccer-data-gen\", # project home page,", "that the docutils get # installed or upgraded on the", "author=\"<NAME>\", author_email=\"<EMAIL>\", description=\"Soccer trajectory and event data generation\", keywords=\"soccer data-generation", "packages=find_packages(), # Project uses reStructuredText, so ensure that the docutils" ]
[ "sm.engine.formula_parser import format_modifiers FDR_CONFIG = {'decoy_sample_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])", "= [(formula, adduct) for formula, adduct in product(formulas, target_adducts)] assert", "decoy_sample_size) assert min_count < len(ions) <= max_count target_ions = list(product(formulas,", "'+K', '+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li'), ], columns=['formula',", "formulas = ['H2O', 'C5H2OH'] target_adducts = ['+H', '+Na'] decoy_sample_size =", "target_adducts = ['+H', '+Na', '[M]+'] target_modifiers = [ format_modifiers(cm, nl,", "['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H', 0.25], ['C1',", "run_fdr_ranking(target_scores, decoy_scores, 1, True, False) fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1,", "list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking(): target_scores = pd.Series([1.0,", "['C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3',", "pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1,", "0.1]) n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7,", "np import pandas as pd from pandas.util.testing import assert_frame_equal from", "'+H', 0.5], ], columns=['formula', 'modifier', 'msm'], ).assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)", "FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels = [0.4,", "type(ions) == list # total number varies because different (formula,", "neutral_losses = ['-O', '-C'] target_adducts = ['+H', '+Na', '[M]+'] target_modifiers", "chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels = [0.4, 0.8] fdr.td_df", "assert min_count < len(ions) <= max_count target_ions = list(product(formulas, target_modifiers))", "/ n_targets expected_fdr_ros = (n_decoys + 1) / (n_targets +", "target_ions = list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking(): target_scores", "* (1 + decoy_sample_size) assert min_count < len(ions) <= max_count", "2 / 8, 2 / 8, 3 / 9, 4", "len(formulas) * len(target_adducts) ) target_ions = [(formula, adduct) for formula,", "analysis_version=analysis_version, ) fdr.fdr_levels = [0.2, 0.8] fdr.td_df = pd.DataFrame( [['H2O',", "['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula',", "len(target_adducts) < len(ions) <= len(formulas) * len(target_adducts) * decoy_sample_size +", "= pd.DataFrame( [ ['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3',", "0.5, 0.4, 0.3, 0.2, 0.1, 0.0]) decoy_scores = pd.Series([0.8, 0.55,", "['C3', '+Cl', 0.25], ['C4', '+Co', 0.1], ], columns=['formula', 'modifier', 'msm'],", "[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4',", "* decoy_sample_size + len(formulas) * len(target_adducts) < len(ions) <= len(formulas)", "'+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula', 'tm', 'dm'], ) msm_df =", "= FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])", "0.3], ['C3', '+Cl', 0.25], ['C4', '+Co', 0.1], ], columns=['formula', 'modifier',", "1, 1, 1, 2, 2, 2, 3, 4, 4]) expected_fdr", "0 / 2, 1 / 5, 1 / 5, 1", "= run_fdr_ranking(target_scores, decoy_scores, 1, False, False) fdr_ros = run_fdr_ranking(target_scores, decoy_scores,", "= pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O',", "as pd from pandas.util.testing import assert_frame_equal from sm.engine.annotation.fdr import FDR,", "0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4], ['C4',", "def test_fdr_decoy_adduct_selection_saves_corr(): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H', '+K',", "['+He', '+Li']) def test_fdr_decoy_adduct_selection_saves_corr(): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[],", "], columns=['formula', 'modifier', 'msm'], ).assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_estimate_fdr_digitize_works():", "[0.2, 0.8]), (3, [1 / 4, 2 / 3])]) def", "ta) for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses],", "'+Ar']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['H2O',", "assert type(ions) == list # total number varies because different", "assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1}", "'tm', 'dm']).reset_index(drop=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 /", "'+Na', '[M]+'] target_modifiers = [ format_modifiers(cm, nl, ta) for cm,", "(formula, decoy_modifier) pair min_count = len(formulas) * len(target_modifiers) max_count =", "number varies because different (formula, modifier) pairs may receive the", "[1 / 4, 2 / 3])]) def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs): fdr", "4, 4]) expected_fdr = n_decoys / n_targets expected_fdr_ros = (n_decoys", "/ 11, 4 / 11] ) fdr = run_fdr_ranking(target_scores, decoy_scores,", "'+Co']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['C1',", "('H2O', '', '+Li'), ], columns=['formula', 'tm', 'dm'], ) fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal(", "'msm'], ).assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG,", "* len(target_modifiers) max_count = len(formulas) * len(target_modifiers) * (1 +", "3])]) def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[],", ") msm_df = pd.DataFrame( [ ['C1', '+H', 1.0], ['C2', '+H',", "0.8] fdr.td_df = pd.DataFrame( [['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'],", "'modifier', 'msm'], ).assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_estimate_fdr_digitize_works(): fdr_config =", "'C5H2OH'] chem_mods = ['-H+C'] neutral_losses = ['-O', '-C'] target_adducts =", "10, 11]) n_decoys = pd.Series([0, 0, 1, 1, 1, 2,", "'+Cl', 0.25], ['C4', '+Co', 0.1], ], columns=['formula', 'modifier', 'msm'], )", "len(formulas) * len(target_adducts) < len(ions) <= len(formulas) * len(target_adducts) *", "in product(['', *chem_mods], ['', *neutral_losses], target_adducts) ] decoy_sample_size = 5", "import format_modifiers FDR_CONFIG = {'decoy_sample_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li']) def", "assert set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH'] chem_mods", "{'decoy_sample_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li']) def test_fdr_decoy_adduct_selection_saves_corr(): fdr = FDR(", "0.5], ['H2O', '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0],", "chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=analysis_version, ) fdr.fdr_levels = [0.2, 0.8] fdr.td_df", "'+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], ['C4', '+Co',", "pd.DataFrame( [ ['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4],", "4 / 11] ) fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False,", "/ 5, 1 / 5, 2 / 8, 2 /", "0.5, 0.4], ['C4', '+H', 0.25, 0.8], ], columns=['formula', 'modifier', 'msm',", "min_count < len(ions) <= max_count target_ions = list(product(formulas, target_modifiers)) assert", "decoy_modifier) pair min_count = len(formulas) * len(target_modifiers) max_count = len(formulas)", "ions = fdr.ion_tuples() assert type(ions) == list # total number", "exp_sf_df = pd.DataFrame( [ ['C1', '+H', 1.0, 0.4], ['C2', '+H',", "total number varies because different (formula, modifier) pairs may receive", "* len(target_adducts) ) target_ions = [(formula, adduct) for formula, adduct", "fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods,", ") target_ions = [(formula, adduct) for formula, adduct in product(formulas,", "expected_fdrs): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=analysis_version, )", "decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, )", "2 / 8, 2 / 8, 2 / 8, 3", "[ ['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5],", "'+Cl'], ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'], ) msm_df =", "fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O',", "0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]) decoy_scores = pd.Series([0.8,", "'+H', 0.5], ['H2O', '+Cu', 0.5], ['H2O', '+Co', 0.5], ['C2H2', '+Ag',", "exp_sf_df = pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5],", "== list # total number varies because different (formula, modifier)", "(n_decoys + 1) / (n_targets + 1) expected_fdr_mono = pd.Series(", "@pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 /", "def test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH'] chem_mods = ['-H+C'] neutral_losses", "'+Na'] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr", "['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], ['C4', '+Co', 0.1], ],", "] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr", "['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']], columns=['formula',", "= ['-O', '-C'] target_adducts = ['+H', '+Na', '[M]+'] target_modifiers =", "'-C'] target_adducts = ['+H', '+Na', '[M]+'] target_modifiers = [ format_modifiers(cm,", "neutral_losses=[], target_adducts=['+H'], analysis_version=analysis_version, ) fdr.fdr_levels = [0.2, 0.8] fdr.td_df =", "len(target_adducts) ) target_ions = [(formula, adduct) for formula, adduct in", "decoy_scores, 1, True, False) fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False,", "8, 3 / 9, 4 / 11, 4 / 11]", "= (n_decoys + 1) / (n_targets + 1) expected_fdr_mono =", "False, True) assert np.isclose(fdr, expected_fdr).all() assert np.isclose(fdr_ros, expected_fdr_ros).all() assert np.isclose(fdr_mono,", "max_count target_ions = list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking():", "fdr.fdr_levels = [0.2, 0.8] fdr.td_df = pd.DataFrame( [['H2O', '+H', '+Cu'],", "'+Li'), ], columns=['formula', 'tm', 'dm'], ) fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal( fdr.td_df.sort_values(by=['formula', 'tm',", "'+H', 0.5], ['C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag',", ") fdr.fdr_levels = [0.4, 0.8] fdr.td_df = pd.DataFrame( [['C1', '+H',", "the same (formula, decoy_modifier) pair assert ( len(formulas) * decoy_sample_size", "= {**FDR_CONFIG, 'decoy_sample_size': 1} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[],", "assert_frame_equal( fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1,", "3, 4, 4]) expected_fdr = n_decoys / n_targets expected_fdr_ros =", "varies because different (formula, modifier) pairs may receive the same", "'+Li']) def test_fdr_decoy_adduct_selection_saves_corr(): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H',", "FDR, run_fdr_ranking from sm.engine.formula_parser import format_modifiers FDR_CONFIG = {'decoy_sample_size': 2}", "2, 2, 3, 4, 4]) expected_fdr = n_decoys / n_targets", "'+Cu', 0.5], ['H2O', '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar',", "0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])", "'+Ag', 0.3], ['C3', '+Cl', 0.25], ['C4', '+Co', 0.1], ], columns=['formula',", "['H2O', 'C5H2OH'] target_adducts = ['+H', '+Na'] decoy_sample_size = 5 fdr_config", "('H2O', '', '+He'), ('H2O', '', '+Li'), ], columns=['formula', 'tm', 'dm'],", "= FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels =", "target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels = [0.4, 0.8] fdr.td_df = pd.DataFrame(", "['H2O', 'C5H2OH'] chem_mods = ['-H+C'] neutral_losses = ['-O', '-C'] target_adducts", "['C2H2', '+H', '+Ar']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame(", "pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 4,", ") @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2", "0.25, 0.8], ], columns=['formula', 'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None),", "'+H', 0.5, 0.4], ['C4', '+H', 0.25, 0.8], ], columns=['formula', 'modifier',", "FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=analysis_version, ) fdr.fdr_levels = [0.2,", "= pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2,", ") exp_sf_df = pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H',", "['C2H2', '+H', 0.5], ], columns=['formula', 'modifier', 'msm'], ).assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df, None),", "0.25], ['C4', '+Co', 0.1], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df", "4]) expected_fdr = n_decoys / n_targets expected_fdr_ros = (n_decoys +", "['C3', '+H', '+Cl'], ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'], )", "'+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li'), ], columns=['formula', 'tm',", "modifier) pairs may receive the same (formula, decoy_modifier) pair assert", "[ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ], columns=['formula', 'modifier',", "= FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H', '+K', '[M]+'], analysis_version=1, )", "('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K', '+He'), ('H2O',", "2 / 8, 3 / 9, 4 / 11, 4", "test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1} fdr = FDR( fdr_config=fdr_config,", "'+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K', '+He'), ('H2O', '+K',", "= len(formulas) * len(target_modifiers) * (1 + decoy_sample_size) assert min_count", "[ ['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3',", "+ len(formulas) * len(target_adducts) ) target_ions = [(formula, adduct) for", "['+H', '+Na'] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}", "decoy_modifier) pair assert ( len(formulas) * decoy_sample_size + len(formulas) *", "8, 2 / 8, 2 / 8, 3 / 9,", "1, False, True) assert np.isclose(fdr, expected_fdr).all() assert np.isclose(fdr_ros, expected_fdr_ros).all() assert", "[ ('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K', '+He'),", "= FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=analysis_version, ) fdr.fdr_levels =", "columns=['formula', 'tm', 'dm'], ) fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal( fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula',", "'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['C1', '+H', 1.0],", "{**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts,", "= ['-H+C'] neutral_losses = ['-O', '-C'] target_adducts = ['+H', '+Na',", "fdr.fdr_levels = [0.4, 0.8] fdr.td_df = pd.DataFrame( [['C1', '+H', '+Cu'],", "/ (n_targets + 1) expected_fdr_mono = pd.Series( [0 / 2,", "FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H', '+K', '[M]+'], analysis_version=1, ) exp_target_decoy_df", "0.8]), (3, [1 / 4, 2 / 3])]) def test_estimate_fdr_returns_correct_df(analysis_version,", "formulas = ['H2O', 'C5H2OH'] chem_mods = ['-H+C'] neutral_losses = ['-O',", "pandas.util.testing import assert_frame_equal from sm.engine.annotation.fdr import FDR, run_fdr_ranking from sm.engine.formula_parser", "receive the same (formula, decoy_modifier) pair assert ( len(formulas) *", "from unittest.mock import patch import pytest import numpy as np", ") fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list", "'+Ar', 0.0], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame(", "fdr.td_df = pd.DataFrame( [['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2',", "['-O', '-C'] target_adducts = ['+H', '+Na', '[M]+'] target_modifiers = [", "in product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas =", "msm_df = pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5],", "import FDR, run_fdr_ranking from sm.engine.formula_parser import format_modifiers FDR_CONFIG = {'decoy_sample_size':", "len(formulas) * len(target_modifiers) max_count = len(formulas) * len(target_modifiers) * (1", "product from unittest.mock import patch import pytest import numpy as", "True) assert np.isclose(fdr, expected_fdr).all() assert np.isclose(fdr_ros, expected_fdr_ros).all() assert np.isclose(fdr_mono, expected_fdr_mono).all()", "target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions)", "False, False) fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False) fdr_mono", "2, 2, 2, 3, 4, 4]) expected_fdr = n_decoys /", "adduct) for formula, adduct in product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions)))", "import patch import pytest import numpy as np import pandas", "'[M]+'] target_modifiers = [ format_modifiers(cm, nl, ta) for cm, nl,", "1, True, False) fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True)", "pair min_count = len(formulas) * len(target_modifiers) max_count = len(formulas) *", "patch import pytest import numpy as np import pandas as", "'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['H2O', '+H', 0.85],", "len(target_modifiers) * (1 + decoy_sample_size) assert min_count < len(ions) <=", "'+K', '[M]+'], analysis_version=1, ) exp_target_decoy_df = pd.DataFrame( [ ('H2O', '+H',", "0.0], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [", "/ 9, 4 / 11, 4 / 11] ) fdr", "pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10,", "'dm'], ) fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal( fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),", "decoy_scores, 1, False, False) fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True,", "len(ions) <= max_count target_ions = list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions)))", "2, 3, 4, 4]) expected_fdr = n_decoys / n_targets expected_fdr_ros", "1 / 5, 1 / 5, 2 / 8, 2", "* len(target_adducts) < len(ions) <= len(formulas) * len(target_adducts) * decoy_sample_size", "0.1, 0.0]) decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1]) n_targets =", "'dm']).reset_index(drop=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4,", "*neutral_losses], target_adducts) ] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size':", "0, 1, 1, 1, 2, 2, 2, 3, 4, 4])", "(1 + decoy_sample_size) assert min_count < len(ions) <= max_count target_ions", "analysis_version=1, ) fdr.fdr_levels = [0.4, 0.8] fdr.td_df = pd.DataFrame( [['C1',", "n_decoys / n_targets expected_fdr_ros = (n_decoys + 1) / (n_targets", "fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False) fdr_ros = run_fdr_ranking(target_scores,", "different (formula, modifier) pairs may receive the same (formula, decoy_modifier)", "FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions", "assert ( len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts) <", "['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4',", "'+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H',", ") fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False) fdr_ros =", "'tm', 'dm'], ) fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal( fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula', 'tm',", "'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3,", "0.4, 0.3, 0.2, 0.1, 0.0]) decoy_scores = pd.Series([0.8, 0.55, 0.2,", "'+H', '+Li'), ('H2O', '+K', '+He'), ('H2O', '+K', '+Li'), ('H2O', '',", "= ['H2O', 'C5H2OH'] target_adducts = ['+H', '+Na'] decoy_sample_size = 5", "= ['H2O', 'C5H2OH'] chem_mods = ['-H+C'] neutral_losses = ['-O', '-C']", "(n_targets + 1) expected_fdr_mono = pd.Series( [0 / 2, 0", "'+H', 0.85], ['C2H2', '+H', 0.5], ], columns=['formula', 'modifier', 'msm'], ).assign(fdr=expected_fdrs)", "0.75, 0.4], ['C3', '+H', 0.5, 0.4], ['C4', '+H', 0.25, 0.8],", "0.4], ['C3', '+H', 0.5, 0.4], ['C4', '+H', 0.25, 0.8], ],", "fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=analysis_version, ) fdr.fdr_levels", "target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3,", "= pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9,", "assert set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking(): target_scores = pd.Series([1.0, 0.9, 0.8,", "0.2, 0.1, 0.0]) decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1]) n_targets", "= pd.Series([0.8, 0.55, 0.2, 0.1]) n_targets = pd.Series([1, 2, 3,", "0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]) decoy_scores =", "2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li']) def test_fdr_decoy_adduct_selection_saves_corr(): fdr = FDR( fdr_config=FDR_CONFIG,", "0.5], ], columns=['formula', 'modifier', 'msm'], ).assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def", "= {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses,", "columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['H2O', '+H',", "pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ], columns=['formula',", "test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=analysis_version,", "pairs may receive the same (formula, decoy_modifier) pair assert (", "['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H',", "fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal( fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs',", "nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts) ] decoy_sample_size", "n_targets expected_fdr_ros = (n_decoys + 1) / (n_targets + 1)", "0.75], ['C3', '+H', 0.5], ['C4', '+H', 0.25], ['C1', '+Cu', 0.75],", "itertools import product from unittest.mock import patch import pytest import", "0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25],", "/ 8, 2 / 8, 3 / 9, 4 /", "False) fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True) assert np.isclose(fdr,", "'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1,", "8, 2 / 8, 3 / 9, 4 / 11,", "modifier) pairs may receive the same (formula, decoy_modifier) pair min_count", "'tm', 'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]),", "['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ], columns=['formula', 'modifier', 'msm'],", "'+H', '+Co']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [", "fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False) fdr_mono = run_fdr_ranking(target_scores,", "'+Li'), ('H2O', '+K', '+He'), ('H2O', '+K', '+Li'), ('H2O', '', '+He'),", "pd.DataFrame( [['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'],", "= run_fdr_ranking(target_scores, decoy_scores, 1, False, True) assert np.isclose(fdr, expected_fdr).all() assert", "= [ format_modifiers(cm, nl, ta) for cm, nl, ta in", "/ 5, 1 / 5, 1 / 5, 2 /", "pd.DataFrame( [ ['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H',", "exp_sf_df) def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1} fdr =", "0.3, 0.2, 0.1, 0.0]) decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1])", "the same (formula, decoy_modifier) pair min_count = len(formulas) * len(target_modifiers)", "* decoy_sample_size + len(formulas) * len(target_adducts) ) target_ions = [(formula,", "pair assert ( len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts)", "/ 11] ) fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False)", "11] ) fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False) fdr_ros", "( len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts) < len(ions)", "'+H', 0.25, 0.8], ], columns=['formula', 'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df,", "decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1, )", "0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], ['C4', '+Co', 0.1],", "0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5], ['H2O', '+Co', 0.5],", "5, 2 / 8, 2 / 8, 2 / 8,", "'+K', '+He'), ('H2O', '+K', '+Li'), ('H2O', '', '+He'), ('H2O', '',", "'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1,", "8, 9, 10, 11]) n_decoys = pd.Series([0, 0, 1, 1,", "['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4], ['C4', '+H',", "numpy as np import pandas as pd from pandas.util.testing import", "fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=[],", "'+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5,", "= fdr.ion_tuples() assert type(ions) == list # total number varies", "['C4', '+H', 0.25, 0.8], ], columns=['formula', 'modifier', 'msm', 'fdr'], )", "decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1]) n_targets = pd.Series([1, 2,", "'msm'], ) exp_sf_df = pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2',", "target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking(): target_scores = pd.Series([1.0, 0.9,", "= pd.DataFrame( [['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H',", "list # total number varies because different (formula, modifier) pairs", "exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1", "'+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']],", "pairs may receive the same (formula, decoy_modifier) pair min_count =", "'+He'), ('H2O', '+H', '+Li'), ('H2O', '+K', '+He'), ('H2O', '+K', '+Li'),", "['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0], ], columns=['formula', 'modifier', 'msm'],", "same (formula, decoy_modifier) pair assert ( len(formulas) * decoy_sample_size +", "ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts) ] decoy_sample_size =", "ions))) def test_run_fdr_ranking(): target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6,", "analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) ==", "= list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking(): target_scores =", "'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_ions(): formulas = ['H2O',", "1, 2, 2, 2, 3, 4, 4]) expected_fdr = n_decoys", ") fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal( fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), )", "*chem_mods], ['', *neutral_losses], target_adducts) ] decoy_sample_size = 5 fdr_config =", "* len(target_modifiers) * (1 + decoy_sample_size) assert min_count < len(ions)", "0.85], ['C2H2', '+H', 0.5], ], columns=['formula', 'modifier', 'msm'], ).assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df,", "['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula', 'tm', 'dm'], )", "columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['H2O', '+H',", "fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1} fdr = FDR( fdr_config=fdr_config, chem_mods=[],", ").assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_size':", "0.5], ['C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3],", "['C2H2', '+Ar', 0.0], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df =", "'+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5], ['H2O', '+Co',", "fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions =", "<= len(formulas) * len(target_adducts) * decoy_sample_size + len(formulas) * len(target_adducts)", "+ 1) / (n_targets + 1) expected_fdr_mono = pd.Series( [0", "None), exp_sf_df) def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1} fdr", "= pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ],", "'+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula', 'tm',", "sm.engine.annotation.fdr import FDR, run_fdr_ranking from sm.engine.formula_parser import format_modifiers FDR_CONFIG =", "run_fdr_ranking(target_scores, decoy_scores, 1, False, True) assert np.isclose(fdr, expected_fdr).all() assert np.isclose(fdr_ros,", "2, 1 / 5, 1 / 5, 1 / 5,", "0.55, 0.2, 0.1]) n_targets = pd.Series([1, 2, 3, 4, 5,", "(3, [1 / 4, 2 / 3])]) def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs):", "'+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl',", "/ 2, 0 / 2, 1 / 5, 1 /", "n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2, 2,", "len(formulas) * len(target_adducts) * decoy_sample_size + len(formulas) * len(target_adducts) )", "/ 4, 2 / 3])]) def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs): fdr =", "neutral_losses=[], target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels = [0.4, 0.8] fdr.td_df =", "pd.Series( [0 / 2, 0 / 2, 1 / 5,", "chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples()", "= pd.Series( [0 / 2, 0 / 2, 1 /", "= pd.DataFrame( [ ('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O',", "3 / 9, 4 / 11, 4 / 11] )", "pandas as pd from pandas.util.testing import assert_frame_equal from sm.engine.annotation.fdr import", "because different (formula, modifier) pairs may receive the same (formula,", "'+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']], columns=['formula', 'tm',", "[(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])])", "format_modifiers FDR_CONFIG = {'decoy_sample_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li']) def test_fdr_decoy_adduct_selection_saves_corr():", "= pd.DataFrame( [['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H',", "], columns=['formula', 'tm', 'dm'], ) fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal( fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),", "'+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4], ['C4', '+H', 0.25,", "0.4], ['C4', '+H', 0.25, 0.8], ], columns=['formula', 'modifier', 'msm', 'fdr'],", "may receive the same (formula, decoy_modifier) pair assert ( len(formulas)", "False) fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False) fdr_mono =", "fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels = [0.4, 0.8]", "'+H', '+Ar']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [", "2 / 3])]) def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs): fdr = FDR( fdr_config=FDR_CONFIG,", "0.8] fdr.td_df = pd.DataFrame( [['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'],", "[0 / 2, 0 / 2, 1 / 5, 1", "5, 1 / 5, 2 / 8, 2 / 8,", "/ 8, 3 / 9, 4 / 11, 4 /", "decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr =", "nl, ta) for cm, nl, ta in product(['', *chem_mods], ['',", "# total number varies because different (formula, modifier) pairs may", "test_ions(): formulas = ['H2O', 'C5H2OH'] target_adducts = ['+H', '+Na'] decoy_sample_size", "0.0]) decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1]) n_targets = pd.Series([1,", "+ decoy_sample_size) assert min_count < len(ions) <= max_count target_ions =", "+ 1) expected_fdr_mono = pd.Series( [0 / 2, 0 /", "'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list # total", "fdr.ion_tuples() assert type(ions) == list # total number varies because", "FDR_CONFIG = {'decoy_sample_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li']) def test_fdr_decoy_adduct_selection_saves_corr(): fdr", ") msm_df = pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H',", "[ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5],", "= FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])", "product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas = ['H2O',", "def test_ions(): formulas = ['H2O', 'C5H2OH'] target_adducts = ['+H', '+Na']", "'[M]+'], analysis_version=1, ) exp_target_decoy_df = pd.DataFrame( [ ('H2O', '+H', '+He'),", "len(ions) <= len(formulas) * len(target_adducts) * decoy_sample_size + len(formulas) *", "= [0.4, 0.8] fdr.td_df = pd.DataFrame( [['C1', '+H', '+Cu'], ['C2',", "], columns=['formula', 'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def", "fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions =", "fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H', '+K', '[M]+'], analysis_version=1, ) exp_target_decoy_df =", "import numpy as np import pandas as pd from pandas.util.testing", "fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O',", "6, 7, 8, 9, 10, 11]) n_decoys = pd.Series([0, 0,", "adduct in product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas", "decoy_scores, 1, False, True) assert np.isclose(fdr, expected_fdr).all() assert np.isclose(fdr_ros, expected_fdr_ros).all()", "0.8], ], columns=['formula', 'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)", "min_count = len(formulas) * len(target_modifiers) max_count = len(formulas) * len(target_modifiers)", "target_modifiers = [ format_modifiers(cm, nl, ta) for cm, nl, ta", "(formula, modifier) pairs may receive the same (formula, decoy_modifier) pair", "'+Co', 0.1], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame(", "assert_frame_equal from sm.engine.annotation.fdr import FDR, run_fdr_ranking from sm.engine.formula_parser import format_modifiers", "chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples()", "= run_fdr_ranking(target_scores, decoy_scores, 1, True, False) fdr_mono = run_fdr_ranking(target_scores, decoy_scores,", "FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions", "= ['+H', '+Na'] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size':", "+ len(formulas) * len(target_adducts) < len(ions) <= len(formulas) * len(target_adducts)", ") fdr.fdr_levels = [0.2, 0.8] fdr.td_df = pd.DataFrame( [['H2O', '+H',", "target_adducts) ] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}", "expected_fdr_mono = pd.Series( [0 / 2, 0 / 2, 1", "decoy_sample_size + len(formulas) * len(target_adducts) ) target_ions = [(formula, adduct)", "from sm.engine.formula_parser import format_modifiers FDR_CONFIG = {'decoy_sample_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He',", "may receive the same (formula, decoy_modifier) pair min_count = len(formulas)", "1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H', 0.25],", "'dm'], ) msm_df = pd.DataFrame( [ ['C1', '+H', 1.0], ['C2',", "'+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula', 'tm', 'dm'],", "['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5], ['H2O', '+Co', 0.5], ['C2H2',", "pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu',", "['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame(", "test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH'] chem_mods = ['-H+C'] neutral_losses =", "import product from unittest.mock import patch import pytest import numpy", "= {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[],", "analysis_version=1, ) exp_target_decoy_df = pd.DataFrame( [ ('H2O', '+H', '+He'), ('H2O',", "[0.2, 0.8] fdr.td_df = pd.DataFrame( [['H2O', '+H', '+Cu'], ['H2O', '+H',", "from itertools import product from unittest.mock import patch import pytest", "fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=analysis_version, ) fdr.fdr_levels = [0.2, 0.8]", "'decoy_sample_size': 1} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1,", "= [0.2, 0.8] fdr.td_df = pd.DataFrame( [['H2O', '+H', '+Cu'], ['H2O',", "def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H'],", "formula, adduct in product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses():", "1, 1, 2, 2, 2, 3, 4, 4]) expected_fdr =", "'+Ag', 0.75], ['C2H2', '+Ar', 0.0], ], columns=['formula', 'modifier', 'msm'], )", "True, False) fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True) assert", "['H2O', '+Cu', 0.5], ['H2O', '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2',", "{**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts,", "[0.4, 0.8] fdr.td_df = pd.DataFrame( [['C1', '+H', '+Cu'], ['C2', '+H',", "[(formula, adduct) for formula, adduct in product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple,", "0.1], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [", "'+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']],", "columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['C1', '+H',", "decoy_sample_size + len(formulas) * len(target_adducts) < len(ions) <= len(formulas) *", "'+H', '+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula', 'tm', 'dm'], ) msm_df", "= ['+H', '+Na', '[M]+'] target_modifiers = [ format_modifiers(cm, nl, ta)", "assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_ions(): formulas = ['H2O', 'C5H2OH'] target_adducts", "1, False, False) fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False)", "0.2, 0.1]) n_targets = pd.Series([1, 2, 3, 4, 5, 6,", "], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['C1',", "[ format_modifiers(cm, nl, ta) for cm, nl, ta in product(['',", "2, 0 / 2, 1 / 5, 1 / 5,", "fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True) assert np.isclose(fdr, expected_fdr).all()", "expected_fdr = n_decoys / n_targets expected_fdr_ros = (n_decoys + 1)", "len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts) < len(ions) <=", "['C3', '+H', 0.5, 0.4], ['C4', '+H', 0.25, 0.8], ], columns=['formula',", "< len(ions) <= len(formulas) * len(target_adducts) * decoy_sample_size + len(formulas)", "['H2O', '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0], ],", "'msm'], ) exp_sf_df = pd.DataFrame( [ ['C1', '+H', 1.0, 0.4],", "= {'decoy_sample_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li']) def test_fdr_decoy_adduct_selection_saves_corr(): fdr =", "('H2O', '+K', '+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li'), ],", "fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H', '+K', '[M]+'], analysis_version=1,", "target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH']", "'+H', '+Cl'], ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'], ) msm_df", "'+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H',", "'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_ions(): formulas", "'+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H',", "fdr.td_df = pd.DataFrame( [['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3',", "for formula, adduct in product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions))) def", "receive the same (formula, decoy_modifier) pair min_count = len(formulas) *", "9, 10, 11]) n_decoys = pd.Series([0, 0, 1, 1, 1,", "'C5H2OH'] target_adducts = ['+H', '+Na'] decoy_sample_size = 5 fdr_config =", "11, 4 / 11] ) fdr = run_fdr_ranking(target_scores, decoy_scores, 1,", "from sm.engine.annotation.fdr import FDR, run_fdr_ranking from sm.engine.formula_parser import format_modifiers FDR_CONFIG", "4, 5, 6, 7, 8, 9, 10, 11]) n_decoys =", "'+He'), ('H2O', '', '+Li'), ], columns=['formula', 'tm', 'dm'], ) fdr.decoy_adducts_selection(target_formulas=['H2O'])", "msm_df = pd.DataFrame( [ ['C1', '+H', 1.0], ['C2', '+H', 0.75],", "/ 8, 2 / 8, 2 / 8, 3 /", "0.5], ['H2O', '+Cu', 0.5], ['H2O', '+Co', 0.5], ['C2H2', '+Ag', 0.75],", "target_adducts = ['+H', '+Na'] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG,", "import pytest import numpy as np import pandas as pd", "@patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li']) def test_fdr_decoy_adduct_selection_saves_corr(): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[],", "'+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'],", "exp_target_decoy_df = pd.DataFrame( [ ('H2O', '+H', '+He'), ('H2O', '+H', '+Li'),", "'', '+He'), ('H2O', '', '+Li'), ], columns=['formula', 'tm', 'dm'], )", "[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2',", "neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert", "'dm'], ) msm_df = pd.DataFrame( [ ['H2O', '+H', 0.85], ['C2H2',", "1) / (n_targets + 1) expected_fdr_mono = pd.Series( [0 /", "'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['H2O', '+H', 0.85],", "product(['', *chem_mods], ['', *neutral_losses], target_adducts) ] decoy_sample_size = 5 fdr_config", "'+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H', 0.25], ['C1', '+Cu',", "ions))) def test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH'] chem_mods = ['-H+C']", "= n_decoys / n_targets expected_fdr_ros = (n_decoys + 1) /", "target_adducts=['+H'], analysis_version=analysis_version, ) fdr.fdr_levels = [0.2, 0.8] fdr.td_df = pd.DataFrame(", "< len(ions) <= max_count target_ions = list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple,", "set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking(): target_scores = pd.Series([1.0, 0.9, 0.8, 0.7,", "chem_mods=[], neutral_losses=[], target_adducts=['+H', '+K', '[M]+'], analysis_version=1, ) exp_target_decoy_df = pd.DataFrame(", "import pandas as pd from pandas.util.testing import assert_frame_equal from sm.engine.annotation.fdr", "], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['H2O',", "def test_run_fdr_ranking(): target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5,", "'+He'), ('H2O', '+K', '+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li'),", "run_fdr_ranking from sm.engine.formula_parser import format_modifiers FDR_CONFIG = {'decoy_sample_size': 2} @patch('sm.engine.annotation.fdr.DECOY_ADDUCTS',", "7, 8, 9, 10, 11]) n_decoys = pd.Series([0, 0, 1,", "len(formulas) * len(target_modifiers) * (1 + decoy_sample_size) assert min_count <", "format_modifiers(cm, nl, ta) for cm, nl, ta in product(['', *chem_mods],", "4 / 11, 4 / 11] ) fdr = run_fdr_ranking(target_scores,", "expected_fdr_ros = (n_decoys + 1) / (n_targets + 1) expected_fdr_mono", "* len(target_adducts) * decoy_sample_size + len(formulas) * len(target_adducts) ) target_ions", "3, 4, 5, 6, 7, 8, 9, 10, 11]) n_decoys", "'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['C1', '+H', 1.0,", "<= max_count target_ions = list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions))) def", "n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7, 8,", "pd.DataFrame( [['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'],", "['+H', '+Na', '[M]+'] target_modifiers = [ format_modifiers(cm, nl, ta) for", "['-H+C'] neutral_losses = ['-O', '-C'] target_adducts = ['+H', '+Na', '[M]+']", "'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_ions(): formulas =", "pd from pandas.util.testing import assert_frame_equal from sm.engine.annotation.fdr import FDR, run_fdr_ranking", "fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list #", "9, 4 / 11, 4 / 11] ) fdr =", "from pandas.util.testing import assert_frame_equal from sm.engine.annotation.fdr import FDR, run_fdr_ranking from", "['C3', '+H', 0.5], ['C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2',", "chem_mods = ['-H+C'] neutral_losses = ['-O', '-C'] target_adducts = ['+H',", "= pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3,", "fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True), ) @pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2,", "['C4', '+Co', 0.1], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df =", "test_fdr_decoy_adduct_selection_saves_corr(): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=['+H', '+K', '[M]+'],", "/ 3])]) def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs): fdr = FDR( fdr_config=FDR_CONFIG, chem_mods=[],", "0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]) decoy_scores", ") exp_target_decoy_df = pd.DataFrame( [ ('H2O', '+H', '+He'), ('H2O', '+H',", "columns=['formula', 'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_ions():", "('H2O', '+K', '+He'), ('H2O', '+K', '+Li'), ('H2O', '', '+He'), ('H2O',", "0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0], ], columns=['formula', 'modifier',", "exp_sf_df) def test_ions(): formulas = ['H2O', 'C5H2OH'] target_adducts = ['+H',", "unittest.mock import patch import pytest import numpy as np import", "neutral_losses=[], target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert", "as np import pandas as pd from pandas.util.testing import assert_frame_equal", "target_adducts=['+H', '+K', '[M]+'], analysis_version=1, ) exp_target_decoy_df = pd.DataFrame( [ ('H2O',", "target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)]", "4, 2 / 3])]) def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs): fdr = FDR(", "5, 1 / 5, 1 / 5, 2 / 8,", ") exp_sf_df = pd.DataFrame( [ ['C1', '+H', 1.0, 0.4], ['C2',", "len(target_modifiers) max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size)", "11]) n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2,", "import assert_frame_equal from sm.engine.annotation.fdr import FDR, run_fdr_ranking from sm.engine.formula_parser import", "0.75], ['C2H2', '+Ar', 0.0], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df", "1} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1, )", "= pd.DataFrame( [ ['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75,", "1) expected_fdr_mono = pd.Series( [0 / 2, 0 / 2,", "neutral_losses=[], target_adducts=['+H', '+K', '[M]+'], analysis_version=1, ) exp_target_decoy_df = pd.DataFrame( [", "'', '+Li'), ], columns=['formula', 'tm', 'dm'], ) fdr.decoy_adducts_selection(target_formulas=['H2O']) assert_frame_equal( fdr.td_df.sort_values(by=['formula',", "for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts)", "1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4],", "run_fdr_ranking(target_scores, decoy_scores, 1, False, False) fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1,", "max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size) assert", "cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts) ]", "def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1} fdr = FDR(", "1 / 5, 2 / 8, 2 / 8, 2", "5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config,", "= len(formulas) * len(target_modifiers) max_count = len(formulas) * len(target_modifiers) *", "2, 3, 4, 5, 6, 7, 8, 9, 10, 11])", "= 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR(", "pd.DataFrame( [ ('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K',", "(formula, decoy_modifier) pair assert ( len(formulas) * decoy_sample_size + len(formulas)", "fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels", "['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5], ['H2O',", "['', *neutral_losses], target_adducts) ] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG,", "set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH'] chem_mods =", "/ 5, 2 / 8, 2 / 8, 2 /", "('H2O', '+H', '+Li'), ('H2O', '+K', '+He'), ('H2O', '+K', '+Li'), ('H2O',", "{**FDR_CONFIG, 'decoy_sample_size': 1} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'],", "columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['C1', '+H',", "'+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0], ], columns=['formula',", "['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], ['C4',", "/ 2, 1 / 5, 1 / 5, 1 /", "same (formula, decoy_modifier) pair min_count = len(formulas) * len(target_modifiers) max_count", "len(target_adducts) * decoy_sample_size + len(formulas) * len(target_adducts) ) target_ions =", "5, 6, 7, 8, 9, 10, 11]) n_decoys = pd.Series([0,", "columns=['formula', 'modifier', 'msm'], ).assign(fdr=expected_fdrs) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_estimate_fdr_digitize_works(): fdr_config", "pd.Series([0.8, 0.55, 0.2, 0.1]) n_targets = pd.Series([1, 2, 3, 4,", ") assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_ions(): formulas = ['H2O', 'C5H2OH']", "test_run_fdr_ranking(): target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4,", "pytest import numpy as np import pandas as pd from", "None), exp_sf_df) def test_ions(): formulas = ['H2O', 'C5H2OH'] target_adducts =", "1 / 5, 1 / 5, 1 / 5, 2" ]
[]
[ "learning, 2018 \"\"\" ub = 1 + ub lb =", "= [] trends = [] for idx in range(len(price)-lookahead+1): arr_window", "trading strategy and not a simple performance proxy. The rationale", "if price[idx+lookahead-1] > price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t", "s[0] < lb)], lookback-1)[0] price = df[column] p = price.rolling(lookback).apply(end_price,", "Barrier The idea is to consider the full dynamics of", "neural networks, 2017 \"\"\" price = df[column] label = (price.shift(-lookback)", "return label def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True): \"\"\"", "\"high\", \"low\", and \"close.\" ub: float It stands for upper", "presented as one of the main procedures to label data", "3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances in financial machine learning,", "s[0] > ub) | (s / s[0] < lb)], lookback-1)[0]", "is presented as one of the main procedures to label", "technical analysis with machine learning techniques, 2016 \"\"\" price =", "price[idx:(idx+lookahead)] if price[idx+lookahead-1] > price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window))", "a simple performance proxy. The rationale for this extension is", "trends = [] for idx in range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)]", "it relates to finance, virtually all ML papers label observations", "trends = (OTr >= np.mean(OTr)).astype(int) return pd.Series(OTr, index=price.index), pd.Series(trends, index=price.index)", "finance, virtually all ML papers label observations using the fixed-time", "for this extension is that often money managers implement P&L", ").dropna() label = pd.Series(0, p.index) label.loc[p > ub] = 1", "the main procedures to label data when it comes to", "0.5 elif price[idx+lookahead-1] <= price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window))", "* 0.5 OTr.append(y_t) OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr", "Prado, Advances in financial machine learning, 2018 5. Dixon et", "is to consider the full dynamics of a trading strategy", "when gains are sufficient or opt out to stop their", "import pandas as pd def fixed_time_horizon(df, column='close', lookback=20): \"\"\" Fixed-time", "machine learning. Parameters ---------- df: pd.DataFrame column: str Choose from", "to finance, virtually all ML papers label observations using the", "0.03 is a 3% stop loss. lookback: str Maximum holding", "5. Dixon et al., Classification-based financial markets prediction using deep", "range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)] if price[idx+lookahead-1] > price[idx]: coef =", "(max(arr_window)-min(arr_window)) y_t = coef * 0.5 + 0.5 elif price[idx+lookahead-1]", "are sufficient or opt out to stop their losses. Upon", "https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances in financial machine", "lb=0.03, lookback=20, binary_classification=True): \"\"\" Triple Barrier The idea is to", "<reponame>penguinwang96825/Umigame import math import numpy as np import pandas as", "look ahead. References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and Dash,", "integrating technical analysis with machine learning techniques. Parameters ---------- df:", "\"high\", \"low\", and \"close.\" lookahead: str The number of days", "to stop their losses. Upon inception of the strategy, three", "k in enumerate(t)], index=t.index ).dropna() label = pd.Series(0, p.index) label.loc[p", "OTr = [] trends = [] for idx in range(len(price)-lookahead+1):", "(price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 OTr.append(y_t) OTr", "strategy, three barriers are fixed (De Prado, 2018). Parameters ----------", "et al., Classification-based financial markets prediction using deep neural networks,", "column: str Choose from \"open\", \"high\", \"low\", and \"close.\" ub:", "consider the full dynamics of a trading strategy and not", "math import numpy as np import pandas as pd def", "References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De", "Dash, A hybrid stock trading framework integrating technical analysis with", "return np.append(r[(s / s[0] > ub) | (s / s[0]", "one of the main procedures to label data when it", "the fixed-time horizon method. Fixed-time horizon is presented as one", "= price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t = pd.Series(", "loss. lookback: str Maximum holding time. References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/", "look ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/", "1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances", "Trading Signal A hybrid stock trading framework integrating technical analysis", "machine learning techniques. Parameters ---------- df: pd.DataFrame column: str Choose", "pd.Series(label, index=price.index) def get_continuous_trading_signals(df, column='close', lookahead=5): \"\"\" Continuous Trading Signal", "= (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 +", "of a trading strategy and not a simple performance proxy.", "1).astype(int) return label def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True):", "pd def fixed_time_horizon(df, column='close', lookback=20): \"\"\" Fixed-time Horizon As it", "str The number of days to look ahead. References ----------", "and Dash, A hybrid stock trading framework integrating technical analysis", "1 label.loc[p < lb] = -1 if binary_classification: label =", "upper bound, e.g. 0.07 is a 7% profit taking. lb:", "= (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 OTr.append(y_t)", "strategy and not a simple performance proxy. The rationale for", "The idea is to consider the full dynamics of a", "elif price[idx+lookahead-1] <= price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t", "to processing financial time series for machine learning. Parameters ----------", "a 7% profit taking. lb: float It stands for lower", "that cash in when gains are sufficient or opt out", "using the fixed-time horizon method. Fixed-time horizon is presented as", "math.isnan(k+i) else np.datetime64('NaT') for i, k in enumerate(t)], index=t.index ).dropna()", "lookback=20, binary_classification=True): \"\"\" Triple Barrier The idea is to consider", "np import pandas as pd def fixed_time_horizon(df, column='close', lookback=20): \"\"\"", "Choose from \"open\", \"high\", \"low\", and \"close.\" ub: float It", "= 1 label.loc[p < lb] = -1 if binary_classification: label", "\"\"\" Fixed-time Horizon As it relates to finance, virtually all", "Continuous Trading Signal A hybrid stock trading framework integrating technical", "De Prado, Advances in financial machine learning, 2018 5. Dixon", "= [] for idx in range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)] if", "Advances in financial machine learning, 2018 5. Dixon et al.,", "lb: float It stands for lower bound, e.g. 0.03 is", "As it relates to finance, virtually all ML papers label", "p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t =", "label = pd.Series(0, p.index) label.loc[p > ub] = 1 label.loc[p", "days to look ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf", "> ub) | (s / s[0] < lb)], lookback-1)[0] price", "ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4.", "fixed-time horizon method. Fixed-time horizon is presented as one of", "e.g. 0.07 is a 7% profit taking. lb: float It", "or opt out to stop their losses. Upon inception of", "is a 3% stop loss. lookback: str Maximum holding time.", "data when it comes to processing financial time series for", "\"\"\" price = df[column] label = (price.shift(-lookback) / price >", "https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances in financial machine learning, 2018", "number of days to look ahead. References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf", "price = df[column] label = (price.shift(-lookback) / price > 1).astype(int)", "/ (max(arr_window)-min(arr_window)) y_t = coef * 0.5 + 0.5 elif", "to look ahead. References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and", "df[column] p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t", "horizon is presented as one of the main procedures to", "from \"open\", \"high\", \"low\", and \"close.\" ub: float It stands", "number of days to look ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html", "\"open\", \"high\", \"low\", and \"close.\" lookahead: str The number of", "= np.where(label == 1, 1, 0) return pd.Series(label, index=price.index) def", "It stands for lower bound, e.g. 0.03 is a 3%", "i, k in enumerate(t)], index=t.index ).dropna() label = pd.Series(0, p.index)", "The rationale for this extension is that often money managers", "| (s / s[0] < lb)], lookback-1)[0] price = df[column]", "ahead. References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and Dash, A", "money managers implement P&L triggers that cash in when gains", "(price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 + 0.5", "idea is to consider the full dynamics of a trading", "2018). Parameters ---------- df: pd.DataFrame column: str Choose from \"open\",", "return np.append(s[(s / s[0] > ub) | (s / s[0]", "t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t = pd.Series( [t.index[int(k+i)] if not", "extension is that often money managers implement P&L triggers that", "of the main procedures to label data when it comes", "Horizon As it relates to finance, virtually all ML papers", "label observations using the fixed-time horizon method. Fixed-time horizon is", "< lb] = -1 if binary_classification: label = np.where(label ==", "as pd def fixed_time_horizon(df, column='close', lookback=20): \"\"\" Fixed-time Horizon As", "ub] = 1 label.loc[p < lb] = -1 if binary_classification:", "s[-1])[0]/s[0] r = np.array(range(lookback)) def end_time(s): return np.append(r[(s / s[0]", "of days to look ahead. References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2.", "prediction using deep neural networks, 2017 \"\"\" price = df[column]", "end_price(s): return np.append(s[(s / s[0] > ub) | (s /", "time. References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4.", "fixed_time_horizon(df, column='close', lookback=20): \"\"\" Fixed-time Horizon As it relates to", "r = np.array(range(lookback)) def end_time(s): return np.append(r[(s / s[0] >", "from \"open\", \"high\", \"low\", and \"close.\" lookahead: str The number", "when it comes to processing financial time series for machine", "index=t.index ).dropna() label = pd.Series(0, p.index) label.loc[p > ub] =", "ub = 1 + ub lb = 1- lb def", "Prado, 2018). Parameters ---------- df: pd.DataFrame column: str Choose from", "pd.DataFrame column: str Choose from \"open\", \"high\", \"low\", and \"close.\"", "performance proxy. The rationale for this extension is that often", "else np.datetime64('NaT') for i, k in enumerate(t)], index=t.index ).dropna() label", "5. De Prado, Advances in financial machine learning, 2018 \"\"\"", "https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado,", "using deep neural networks, 2017 \"\"\" price = df[column] label", "learning techniques. Parameters ---------- df: pd.DataFrame column: str Choose from", "trading framework integrating technical analysis with machine learning techniques. Parameters", "barriers are fixed (De Prado, 2018). Parameters ---------- df: pd.DataFrame", "to consider the full dynamics of a trading strategy and", "price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t = pd.Series( [t.index[int(k+i)]", "/ (max(arr_window)-min(arr_window)) y_t = coef * 0.5 OTr.append(y_t) OTr =", "https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado, Advances in financial machine", "proxy. The rationale for this extension is that often money", "\"low\", and \"close.\" ub: float It stands for upper bound,", "lb = 1- lb def end_price(s): return np.append(s[(s / s[0]", "= df[column] label = (price.shift(-lookback) / price > 1).astype(int) return", "Dash and Dash, A hybrid stock trading framework integrating technical", "1, 1, 0) return pd.Series(label, index=price.index) def get_continuous_trading_signals(df, column='close', lookahead=5):", "numpy as np import pandas as pd def fixed_time_horizon(df, column='close',", "lb] = -1 if binary_classification: label = np.where(label == 1,", "lookback: str Maximum holding time. References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2.", "Fixed-time horizon is presented as one of the main procedures", "0.5 + 0.5 elif price[idx+lookahead-1] <= price[idx]: coef = (price[idx+lookahead-1]-min(arr_window))", "raw=True).shift(-lookback+1) t = pd.Series( [t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT')", "to look ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3.", "< lb)], s[-1])[0]/s[0] r = np.array(range(lookback)) def end_time(s): return np.append(r[(s", "comes to processing financial time series for machine learning. Parameters", "7% profit taking. lb: float It stands for lower bound,", "\"low\", and \"close.\" lookahead: str The number of days to", "3% stop loss. lookback: str Maximum holding time. References ----------", "2016 \"\"\" price = df.data[column] OTr = [] trends =", "\"close.\" lookahead: str The number of days to look ahead.", "def end_price(s): return np.append(s[(s / s[0] > ub) | (s", "financial time series for machine learning. Parameters ---------- df: pd.DataFrame", "4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado, Advances in financial machine learning,", "lookback-1)[0] price = df[column] p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t =", "ML papers label observations using the fixed-time horizon method. Fixed-time", "price = df[column] p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time,", "column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True): \"\"\" Triple Barrier The idea", "https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado, Advances in financial machine learning, 2018", "/ s[0] < lb)], lookback-1)[0] price = df[column] p =", "networks, 2017 \"\"\" price = df[column] label = (price.shift(-lookback) /", "= np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr >= np.mean(OTr)).astype(int) return pd.Series(OTr,", "framework integrating technical analysis with machine learning techniques, 2016 \"\"\"", "in financial machine learning, 2018 \"\"\" ub = 1 +", "lookback=20): \"\"\" Fixed-time Horizon As it relates to finance, virtually", "The number of days to look ahead. References ---------- 1.", "> ub) | (s / s[0] < lb)], s[-1])[0]/s[0] r", "learning techniques, 2016 \"\"\" price = df.data[column] OTr = []", "bound, e.g. 0.03 is a 3% stop loss. lookback: str", "machine learning, 2018 \"\"\" ub = 1 + ub lb", "with machine learning techniques. Parameters ---------- df: pd.DataFrame column: str", "often money managers implement P&L triggers that cash in when", "/ s[0] < lb)], s[-1])[0]/s[0] r = np.array(range(lookback)) def end_time(s):", "implement P&L triggers that cash in when gains are sufficient", "(De Prado, 2018). Parameters ---------- df: pd.DataFrame column: str Choose", "(price.shift(-lookback) / price > 1).astype(int) return label def triple_barrier(df, column='close',", "al., Classification-based financial markets prediction using deep neural networks, 2017", "for lower bound, e.g. 0.03 is a 3% stop loss.", "= pd.Series( [t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT') for i,", "method. Fixed-time horizon is presented as one of the main", "lower bound, e.g. 0.03 is a 3% stop loss. lookback:", "integrating technical analysis with machine learning techniques, 2016 \"\"\" price", "/ s[0] > ub) | (s / s[0] < lb)],", "is that often money managers implement P&L triggers that cash", "float It stands for lower bound, e.g. 0.03 is a", "as one of the main procedures to label data when", "Prado, Advances in financial machine learning, 2018 \"\"\" ub =", "virtually all ML papers label observations using the fixed-time horizon", "techniques, 2016 \"\"\" price = df.data[column] OTr = [] trends", "not a simple performance proxy. The rationale for this extension", "and not a simple performance proxy. The rationale for this", "import math import numpy as np import pandas as pd", "techniques. Parameters ---------- df: pd.DataFrame column: str Choose from \"open\",", "papers label observations using the fixed-time horizon method. Fixed-time horizon", "analysis with machine learning techniques, 2016 \"\"\" price = df.data[column]", "a 3% stop loss. lookback: str Maximum holding time. References", "holding time. References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/", "Fixed-time Horizon As it relates to finance, virtually all ML", "rationale for this extension is that often money managers implement", "stop their losses. Upon inception of the strategy, three barriers", "[t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT') for i, k in", "(max(arr_window)-min(arr_window)) y_t = coef * 0.5 OTr.append(y_t) OTr = np.append(OTr,", "4. De Prado, Advances in financial machine learning, 2018 5.", "not math.isnan(k+i) else np.datetime64('NaT') for i, k in enumerate(t)], index=t.index", "\"\"\" Triple Barrier The idea is to consider the full", "column: str Choose from \"open\", \"high\", \"low\", and \"close.\" lookahead:", "triggers that cash in when gains are sufficient or opt", "fixed (De Prado, 2018). Parameters ---------- df: pd.DataFrame column: str", "[] trends = [] for idx in range(len(price)-lookahead+1): arr_window =", "full dynamics of a trading strategy and not a simple", "def get_continuous_trading_signals(df, column='close', lookahead=5): \"\"\" Continuous Trading Signal A hybrid", "= df.data[column] OTr = [] trends = [] for idx", "2017 \"\"\" price = df[column] label = (price.shift(-lookback) / price", "with machine learning techniques, 2016 \"\"\" price = df.data[column] OTr", "= df[column] p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1)", "if binary_classification: label = np.where(label == 1, 1, 0) return", "out to stop their losses. Upon inception of the strategy,", "OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr >= np.mean(OTr)).astype(int) return", "https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances in", "gains are sufficient or opt out to stop their losses.", "stands for lower bound, e.g. 0.03 is a 3% stop", "np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr >= np.mean(OTr)).astype(int) return pd.Series(OTr, index=price.index),", "= (price.shift(-lookback) / price > 1).astype(int) return label def triple_barrier(df,", "arr_window = price[idx:(idx+lookahead)] if price[idx+lookahead-1] > price[idx]: coef = (price[idx+lookahead-1]-min(arr_window))", "in range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)] if price[idx+lookahead-1] > price[idx]: coef", "procedures to label data when it comes to processing financial", "https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and Dash, A hybrid stock trading framework", "\"\"\" price = df.data[column] OTr = [] trends = []", "and \"close.\" ub: float It stands for upper bound, e.g.", "s[0] > ub) | (s / s[0] < lb)], s[-1])[0]/s[0]", "lookahead=5): \"\"\" Continuous Trading Signal A hybrid stock trading framework", "2018 \"\"\" ub = 1 + ub lb = 1-", "Advances in financial machine learning, 2018 \"\"\" ub = 1", "label.loc[p > ub] = 1 label.loc[p < lb] = -1", "def fixed_time_horizon(df, column='close', lookback=20): \"\"\" Fixed-time Horizon As it relates", "main procedures to label data when it comes to processing", "It stands for upper bound, e.g. 0.07 is a 7%", "are fixed (De Prado, 2018). Parameters ---------- df: pd.DataFrame column:", "> price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef", "and \"close.\" lookahead: str The number of days to look", "inception of the strategy, three barriers are fixed (De Prado,", "pd.Series( [t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT') for i, k", "---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and Dash, A hybrid stock", "s[0] < lb)], s[-1])[0]/s[0] r = np.array(range(lookback)) def end_time(s): return", "time series for machine learning. Parameters ---------- df: pd.DataFrame column:", "financial machine learning, 2018 \"\"\" ub = 1 + ub", "| (s / s[0] < lb)], s[-1])[0]/s[0] r = np.array(range(lookback))", "markets prediction using deep neural networks, 2017 \"\"\" price =", "* 0.5 + 0.5 elif price[idx+lookahead-1] <= price[idx]: coef =", "ub) | (s / s[0] < lb)], lookback-1)[0] price =", "for i, k in enumerate(t)], index=t.index ).dropna() label = pd.Series(0,", "np.where(label == 1, 1, 0) return pd.Series(label, index=price.index) def get_continuous_trading_signals(df,", "lb def end_price(s): return np.append(s[(s / s[0] > ub) |", "2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances in financial", "p.index) label.loc[p > ub] = 1 label.loc[p < lb] =", "df.data[column] OTr = [] trends = [] for idx in", "def end_time(s): return np.append(r[(s / s[0] > ub) | (s", "1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De", "end_time(s): return np.append(r[(s / s[0] > ub) | (s /", "def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True): \"\"\" Triple Barrier", "2. Dash and Dash, A hybrid stock trading framework integrating", "ub) | (s / s[0] < lb)], s[-1])[0]/s[0] r =", "a trading strategy and not a simple performance proxy. The", "---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5.", "1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and Dash, A hybrid stock trading", "price[idx+lookahead-1] <= price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t =", "= price[idx:(idx+lookahead)] if price[idx+lookahead-1] > price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) /", "profit taking. lb: float It stands for lower bound, e.g.", "financial markets prediction using deep neural networks, 2017 \"\"\" price", "np.append(s[(s / s[0] > ub) | (s / s[0] <", "-1 if binary_classification: label = np.where(label == 1, 1, 0)", "import numpy as np import pandas as pd def fixed_time_horizon(df,", "= np.array(range(lookback)) def end_time(s): return np.append(r[(s / s[0] > ub)", "dynamics of a trading strategy and not a simple performance", "taking. lb: float It stands for lower bound, e.g. 0.03", "http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado, Advances in", "pd.Series(0, p.index) label.loc[p > ub] = 1 label.loc[p < lb]", "/ price > 1).astype(int) return label def triple_barrier(df, column='close', ub=0.07,", "np.array(range(lookback)) def end_time(s): return np.append(r[(s / s[0] > ub) |", "sufficient or opt out to stop their losses. Upon inception", "coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5", "technical analysis with machine learning techniques. Parameters ---------- df: pd.DataFrame", "machine learning techniques, 2016 \"\"\" price = df.data[column] OTr =", "A hybrid stock trading framework integrating technical analysis with machine", "label def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True): \"\"\" Triple", "lb)], lookback-1)[0] price = df[column] p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t", "machine learning, 2018 5. Dixon et al., Classification-based financial markets", "> ub] = 1 label.loc[p < lb] = -1 if", "stock trading framework integrating technical analysis with machine learning techniques.", "of the strategy, three barriers are fixed (De Prado, 2018).", "price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t = pd.Series( [t.index[int(k+i)] if not math.isnan(k+i) else", "binary_classification: label = np.where(label == 1, 1, 0) return pd.Series(label,", "\"\"\" ub = 1 + ub lb = 1- lb", "binary_classification=True): \"\"\" Triple Barrier The idea is to consider the", "Dixon et al., Classification-based financial markets prediction using deep neural", "e.g. 0.03 is a 3% stop loss. lookback: str Maximum", "De Prado, Advances in financial machine learning, 2018 \"\"\" ub", "their losses. Upon inception of the strategy, three barriers are", "deep neural networks, 2017 \"\"\" price = df[column] label =", "in enumerate(t)], index=t.index ).dropna() label = pd.Series(0, p.index) label.loc[p >", "1, 0) return pd.Series(label, index=price.index) def get_continuous_trading_signals(df, column='close', lookahead=5): \"\"\"", "df: pd.DataFrame column: str Choose from \"open\", \"high\", \"low\", and", "triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True): \"\"\" Triple Barrier The", "simple performance proxy. The rationale for this extension is that", "price > 1).astype(int) return label def triple_barrier(df, column='close', ub=0.07, lb=0.03,", "three barriers are fixed (De Prado, 2018). Parameters ---------- df:", "price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef *", "bound, e.g. 0.07 is a 7% profit taking. lb: float", "0.07 is a 7% profit taking. lb: float It stands", "= coef * 0.5 OTr.append(y_t) OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends", "for machine learning. Parameters ---------- df: pd.DataFrame column: str Choose", "managers implement P&L triggers that cash in when gains are", "relates to finance, virtually all ML papers label observations using", "= pd.Series(0, p.index) label.loc[p > ub] = 1 label.loc[p <", "financial machine learning, 2018 5. Dixon et al., Classification-based financial", "\"\"\" Continuous Trading Signal A hybrid stock trading framework integrating", "<= price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef", "np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr >= np.mean(OTr)).astype(int) return pd.Series(OTr, index=price.index), pd.Series(trends,", "processing financial time series for machine learning. Parameters ---------- df:", "label = (price.shift(-lookback) / price > 1).astype(int) return label def", "Choose from \"open\", \"high\", \"low\", and \"close.\" lookahead: str The", "to label data when it comes to processing financial time", "> 1).astype(int) return label def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20,", "1- lb def end_price(s): return np.append(s[(s / s[0] > ub)", "== 1, 1, 0) return pd.Series(label, index=price.index) def get_continuous_trading_signals(df, column='close',", "= -1 if binary_classification: label = np.where(label == 1, 1,", "price = df.data[column] OTr = [] trends = [] for", "the full dynamics of a trading strategy and not a", "coef * 0.5 + 0.5 elif price[idx+lookahead-1] <= price[idx]: coef", "it comes to processing financial time series for machine learning.", "opt out to stop their losses. Upon inception of the", "= 1- lb def end_price(s): return np.append(s[(s / s[0] >", "all ML papers label observations using the fixed-time horizon method.", "framework integrating technical analysis with machine learning techniques. Parameters ----------", "horizon method. Fixed-time horizon is presented as one of the", "for idx in range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)] if price[idx+lookahead-1] >", "stock trading framework integrating technical analysis with machine learning techniques,", "ub: float It stands for upper bound, e.g. 0.07 is", "---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado,", "+ 0.5 elif price[idx+lookahead-1] <= price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) /", "y_t = coef * 0.5 OTr.append(y_t) OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr))))", "np.datetime64('NaT') for i, k in enumerate(t)], index=t.index ).dropna() label =", "label = np.where(label == 1, 1, 0) return pd.Series(label, index=price.index)", "y_t = coef * 0.5 + 0.5 elif price[idx+lookahead-1] <=", "(s / s[0] < lb)], lookback-1)[0] price = df[column] p", "days to look ahead. References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash", "learning. Parameters ---------- df: pd.DataFrame column: str Choose from \"open\",", "is a 7% profit taking. lb: float It stands for", "stop loss. lookback: str Maximum holding time. References ---------- 1.", "this extension is that often money managers implement P&L triggers", "ub=0.07, lb=0.03, lookback=20, binary_classification=True): \"\"\" Triple Barrier The idea is", "analysis with machine learning techniques. Parameters ---------- df: pd.DataFrame column:", "if not math.isnan(k+i) else np.datetime64('NaT') for i, k in enumerate(t)],", "that often money managers implement P&L triggers that cash in", "hybrid stock trading framework integrating technical analysis with machine learning", "2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado, Advances", "t = pd.Series( [t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT') for", "P&L triggers that cash in when gains are sufficient or", "label.loc[p < lb] = -1 if binary_classification: label = np.where(label", "pandas as pd def fixed_time_horizon(df, column='close', lookback=20): \"\"\" Fixed-time Horizon", "Triple Barrier The idea is to consider the full dynamics", "in financial machine learning, 2018 5. Dixon et al., Classification-based", "3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado, Advances in financial", "Parameters ---------- df: pd.DataFrame column: str Choose from \"open\", \"high\",", "df[column] label = (price.shift(-lookback) / price > 1).astype(int) return label", "in when gains are sufficient or opt out to stop", "coef * 0.5 OTr.append(y_t) OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends =", "< lb)], lookback-1)[0] price = df[column] p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1)", "ub lb = 1- lb def end_price(s): return np.append(s[(s /", "lb)], s[-1])[0]/s[0] r = np.array(range(lookback)) def end_time(s): return np.append(r[(s /", "(s / s[0] < lb)], s[-1])[0]/s[0] r = np.array(range(lookback)) def", "Maximum holding time. References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3.", "float It stands for upper bound, e.g. 0.07 is a", "cash in when gains are sufficient or opt out to", "Signal A hybrid stock trading framework integrating technical analysis with", "losses. Upon inception of the strategy, three barriers are fixed", "index=price.index) def get_continuous_trading_signals(df, column='close', lookahead=5): \"\"\" Continuous Trading Signal A", "idx in range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)] if price[idx+lookahead-1] > price[idx]:", "References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e", "np.append(r[(s / s[0] > ub) | (s / s[0] <", "the strategy, three barriers are fixed (De Prado, 2018). Parameters", "lookahead: str The number of days to look ahead. References", "price[idx+lookahead-1] > price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t =", "0.5 OTr.append(y_t) OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr >=", "label data when it comes to processing financial time series", "observations using the fixed-time horizon method. Fixed-time horizon is presented", "\"open\", \"high\", \"low\", and \"close.\" ub: float It stands for", "= coef * 0.5 + 0.5 elif price[idx+lookahead-1] <= price[idx]:", "OTr.append(y_t) OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr >= np.mean(OTr)).astype(int)", "\"close.\" ub: float It stands for upper bound, e.g. 0.07", "for upper bound, e.g. 0.07 is a 7% profit taking.", "stands for upper bound, e.g. 0.07 is a 7% profit", "References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and Dash, A hybrid", "raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t = pd.Series( [t.index[int(k+i)] if", "= 1 + ub lb = 1- lb def end_price(s):", "+ ub lb = 1- lb def end_price(s): return np.append(s[(s", "Classification-based financial markets prediction using deep neural networks, 2017 \"\"\"", "str Choose from \"open\", \"high\", \"low\", and \"close.\" ub: float", "column='close', lookahead=5): \"\"\" Continuous Trading Signal A hybrid stock trading", "column='close', lookback=20): \"\"\" Fixed-time Horizon As it relates to finance,", "[] for idx in range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)] if price[idx+lookahead-1]", "series for machine learning. Parameters ---------- df: pd.DataFrame column: str", "---------- df: pd.DataFrame column: str Choose from \"open\", \"high\", \"low\",", "= price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t = pd.Series( [t.index[int(k+i)] if not math.isnan(k+i)", "return pd.Series(label, index=price.index) def get_continuous_trading_signals(df, column='close', lookahead=5): \"\"\" Continuous Trading", "of days to look ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2.", "2018 5. Dixon et al., Classification-based financial markets prediction using", "str Choose from \"open\", \"high\", \"low\", and \"close.\" lookahead: str", "trading framework integrating technical analysis with machine learning techniques, 2016", "1 + ub lb = 1- lb def end_price(s): return", "as np import pandas as pd def fixed_time_horizon(df, column='close', lookback=20):", "learning, 2018 5. Dixon et al., Classification-based financial markets prediction", "get_continuous_trading_signals(df, column='close', lookahead=5): \"\"\" Continuous Trading Signal A hybrid stock", "enumerate(t)], index=t.index ).dropna() label = pd.Series(0, p.index) label.loc[p > ub]", "str Maximum holding time. References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method", "0) return pd.Series(label, index=price.index) def get_continuous_trading_signals(df, column='close', lookahead=5): \"\"\" Continuous", "Upon inception of the strategy, three barriers are fixed (De" ]
[ "None: output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs) if os.path.exists(output_filepath): return output_filepath", "convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs): size = kwargs.get('size') file_format", "import OfficeConversionError, UnknownFileFormat from .literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT)", "fs_cleanup from .exceptions import OfficeConversionError, UnknownFileFormat from .literals import (DEFAULT_PAGE_NUMBER,", "mimetype=None, *args, **kwargs): size = kwargs.get('size') file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT)", "if output_filepath is None: output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs) if", "create_image_cache_filename(input_filepath, *args, **kwargs): if input_filepath: hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)]))", "*args, **kwargs) if os.path.exists(output_filepath): return output_filepath if office_converter: try: office_converter.convert(input_filepath,", "OSError: pass def create_image_cache_filename(input_filepath, *args, **kwargs): if input_filepath: hash_value =", "DEFAULT_PAGE_NUMBER) transformations = kwargs.get('transformations', []) if transformations is None: transformations", "hashlib import logging import os from django.utils.encoding import smart_str from", "= 'application/pdf' else: # Recycle the already detected mimetype mimetype", "if rotation != 0 and rotation != 360: transformations.append( {", "common.utils import fs_cleanup from .exceptions import OfficeConversionError, UnknownFileFormat from .literals", "cleanup_files: fs_cleanup(input_filepath) return output_filepath def get_page_count(input_filepath): logger.debug('office_converter: %s' % office_converter)", "input_filepath = office_converter.output_filepath except OfficeConversionError: raise UnknownFileFormat('office converter exception') return", "if input_filepath: hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)])) return os.path.join(TEMPORARY_DIRECTORY, hash_value)", "if office_converter: try: office_converter.convert(input_filepath, mimetype=mimetype) if office_converter.exists: input_filepath = office_converter.output_filepath", "result def get_format_list(): return [(format, FILE_FORMATS.get(format, u'')) for format in", "office_converter.mimetype except OfficeConversionError: raise UnknownFileFormat('office converter exception') if size: transformations.append(", "*args, **kwargs)) except OSError: pass def create_image_cache_filename(input_filepath, *args, **kwargs): if", "= kwargs.get('page', DEFAULT_PAGE_NUMBER) transformations = kwargs.get('transformations', []) if transformations is", "transformations is None: transformations = [] if output_filepath is None:", "if size: transformations.append( { 'transformation': TRANSFORMATION_RESIZE, 'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR)))", "return output_filepath def get_page_count(input_filepath): logger.debug('office_converter: %s' % office_converter) if office_converter:", "{ 'transformation': TRANSFORMATION_RESIZE, 'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR))) } ) if", "size: transformations.append( { 'transformation': TRANSFORMATION_RESIZE, 'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR))) }", "mimetype = 'application/pdf' else: # Recycle the already detected mimetype", "= lambda x: hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__) def cache_cleanup(input_filepath, *args,", "smart_str from common.conf.settings import TEMPORARY_DIRECTORY from common.utils import fs_cleanup from", "input_filepath = office_converter.output_filepath mimetype = 'application/pdf' else: # Recycle the", "OfficeConversionError: raise UnknownFileFormat('office converter exception') return backend.get_page_count(input_filepath) def get_available_transformations_choices(): result", "hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__) def cache_cleanup(input_filepath, *args, **kwargs): try: os.remove(create_image_cache_filename(input_filepath,", "} ) if rotation != 0 and rotation != 360:", "os.path.exists(output_filepath): return output_filepath if office_converter: try: office_converter.convert(input_filepath, mimetype=mimetype) if office_converter.exists:", "django.utils.encoding import smart_str from common.conf.settings import TEMPORARY_DIRECTORY from common.utils import", "= kwargs.get('transformations', []) if transformations is None: transformations = []", "raise UnknownFileFormat('office converter exception') if size: transformations.append( { 'transformation': TRANSFORMATION_RESIZE,", "UnknownFileFormat from .literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals", "from __future__ import absolute_import import hashlib import logging import os", "def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs): size = kwargs.get('size')", "'transformation': TRANSFORMATION_RESIZE, 'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR))) } ) if zoom", "'transformation': TRANSFORMATION_ROTATE, 'arguments': {'degrees': rotation} } ) try: backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath,", "return os.path.join(TEMPORARY_DIRECTORY, hash_value) else: return None def convert(input_filepath, output_filepath=None, cleanup_files=False,", "def get_page_count(input_filepath): logger.debug('office_converter: %s' % office_converter) if office_converter: try: office_converter.convert(input_filepath)", "and rotation != 360: transformations.append( { 'transformation': TRANSFORMATION_ROTATE, 'arguments': {'degrees':", "from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from", "office_converter.exists: input_filepath = office_converter.output_filepath except OfficeConversionError: raise UnknownFileFormat('office converter exception')", "None: transformations = [] if output_filepath is None: output_filepath =", "TRANSFORMATION_RESIZE, 'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR))) } ) if zoom !=", "% office_converter.exists) if office_converter.exists: input_filepath = office_converter.output_filepath except OfficeConversionError: raise", "{'degrees': rotation} } ) try: backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format,", "mimetype = office_converter.mimetype except OfficeConversionError: raise UnknownFileFormat('office converter exception') if", "!= 360: transformations.append( { 'transformation': TRANSFORMATION_ROTATE, 'arguments': {'degrees': rotation} }", "[]) if transformations is None: transformations = [] if output_filepath", "'arguments': {'percent': zoom} } ) if rotation != 0 and", "kwargs.get('rotation', DEFAULT_ROTATION) page = kwargs.get('page', DEFAULT_PAGE_NUMBER) transformations = kwargs.get('transformations', [])", "TRANSFORMATION_CHOICES[transformation]['label'])) return result def get_format_list(): return [(format, FILE_FORMATS.get(format, u'')) for", "def cache_cleanup(input_filepath, *args, **kwargs): try: os.remove(create_image_cache_filename(input_filepath, *args, **kwargs)) except OSError:", "import smart_str from common.conf.settings import TEMPORARY_DIRECTORY from common.utils import fs_cleanup", "def create_image_cache_filename(input_filepath, *args, **kwargs): if input_filepath: hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args),", "pass def create_image_cache_filename(input_filepath, *args, **kwargs): if input_filepath: hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)),", ".literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime", "backend.get_available_transformations(): result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label'])) return result def get_format_list(): return [(format, FILE_FORMATS.get(format,", "DEFAULT_ROTATION) page = kwargs.get('page', DEFAULT_PAGE_NUMBER) transformations = kwargs.get('transformations', []) if", ".literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals import (TRANSFORMATION_CHOICES,", "transformations = [] if output_filepath is None: output_filepath = create_image_cache_filename(input_filepath,", "= [] if output_filepath is None: output_filepath = create_image_cache_filename(input_filepath, *args,", "is None: transformations = [] if output_filepath is None: output_filepath", "output_filepath if office_converter: try: office_converter.convert(input_filepath, mimetype=mimetype) if office_converter.exists: input_filepath =", "= logging.getLogger(__name__) def cache_cleanup(input_filepath, *args, **kwargs): try: os.remove(create_image_cache_filename(input_filepath, *args, **kwargs))", "os.remove(create_image_cache_filename(input_filepath, *args, **kwargs)) except OSError: pass def create_image_cache_filename(input_filepath, *args, **kwargs):", "100: transformations.append( { 'transformation': TRANSFORMATION_ZOOM, 'arguments': {'percent': zoom} } )", "except OfficeConversionError: raise UnknownFileFormat('office converter exception') return backend.get_page_count(input_filepath) def get_available_transformations_choices():", "transformations = kwargs.get('transformations', []) if transformations is None: transformations =", "output_filepath def get_page_count(input_filepath): logger.debug('office_converter: %s' % office_converter) if office_converter: try:", "rotation != 360: transformations.append( { 'transformation': TRANSFORMATION_ROTATE, 'arguments': {'degrees': rotation}", "Recycle the already detected mimetype mimetype = office_converter.mimetype except OfficeConversionError:", "= kwargs.get('rotation', DEFAULT_ROTATION) page = kwargs.get('page', DEFAULT_PAGE_NUMBER) transformations = kwargs.get('transformations',", "common.conf.settings import TEMPORARY_DIRECTORY from common.utils import fs_cleanup from .exceptions import", "kwargs.get('file_format', DEFAULT_FILE_FORMAT) zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL) rotation = kwargs.get('rotation', DEFAULT_ROTATION)", "!= 100: transformations.append( { 'transformation': TRANSFORMATION_ZOOM, 'arguments': {'percent': zoom} }", "from common.utils import fs_cleanup from .exceptions import OfficeConversionError, UnknownFileFormat from", "*args, **kwargs): try: os.remove(create_image_cache_filename(input_filepath, *args, **kwargs)) except OSError: pass def", "try: os.remove(create_image_cache_filename(input_filepath, *args, **kwargs)) except OSError: pass def create_image_cache_filename(input_filepath, *args,", "output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs) if os.path.exists(output_filepath): return output_filepath if", "backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype) finally: if cleanup_files: fs_cleanup(input_filepath)", "try: office_converter.convert(input_filepath) logger.debug('office_converter.exists: %s' % office_converter.exists) if office_converter.exists: input_filepath =", "in backend.get_available_transformations(): result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label'])) return result def get_format_list(): return [(format,", "rotation} } ) try: backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype)", "u'height'], size.split(DIMENSION_SEPARATOR))) } ) if zoom != 100: transformations.append( {", "logging.getLogger(__name__) def cache_cleanup(input_filepath, *args, **kwargs): try: os.remove(create_image_cache_filename(input_filepath, *args, **kwargs)) except", "cache_cleanup(input_filepath, *args, **kwargs): try: os.remove(create_image_cache_filename(input_filepath, *args, **kwargs)) except OSError: pass", "from .exceptions import OfficeConversionError, UnknownFileFormat from .literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL,", "360: transformations.append( { 'transformation': TRANSFORMATION_ROTATE, 'arguments': {'degrees': rotation} } )", "already detected mimetype mimetype = office_converter.mimetype except OfficeConversionError: raise UnknownFileFormat('office", "transformations.append( { 'transformation': TRANSFORMATION_RESIZE, 'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR))) } )", "backend.get_page_count(input_filepath) def get_available_transformations_choices(): result = [] for transformation in backend.get_available_transformations():", "**kwargs): try: os.remove(create_image_cache_filename(input_filepath, *args, **kwargs)) except OSError: pass def create_image_cache_filename(input_filepath,", "**kwargs)) except OSError: pass def create_image_cache_filename(input_filepath, *args, **kwargs): if input_filepath:", "DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime import backend, office_converter HASH_FUNCTION = lambda", "'application/pdf' else: # Recycle the already detected mimetype mimetype =", "return None def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs): size", "output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype) finally: if cleanup_files: fs_cleanup(input_filepath) return", "UnknownFileFormat('office converter exception') return backend.get_page_count(input_filepath) def get_available_transformations_choices(): result = []", "result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label'])) return result def get_format_list(): return [(format, FILE_FORMATS.get(format, u''))", "lambda x: hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__) def cache_cleanup(input_filepath, *args, **kwargs):", "exception') return backend.get_page_count(input_filepath) def get_available_transformations_choices(): result = [] for transformation", "TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime import backend, office_converter HASH_FUNCTION", "%s' % office_converter.exists) if office_converter.exists: input_filepath = office_converter.output_filepath except OfficeConversionError:", "transformations=transformations, page=page, file_format=file_format, mimetype=mimetype) finally: if cleanup_files: fs_cleanup(input_filepath) return output_filepath", "zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL) rotation = kwargs.get('rotation', DEFAULT_ROTATION) page =", "size = kwargs.get('size') file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT) zoom = kwargs.get('zoom',", "fs_cleanup(input_filepath) return output_filepath def get_page_count(input_filepath): logger.debug('office_converter: %s' % office_converter) if", "DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR,", ".exceptions import OfficeConversionError, UnknownFileFormat from .literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION,", "except OfficeConversionError: raise UnknownFileFormat('office converter exception') if size: transformations.append( {", "converter exception') return backend.get_page_count(input_filepath) def get_available_transformations_choices(): result = [] for", "input_filepath: hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)])) return os.path.join(TEMPORARY_DIRECTORY, hash_value) else:", "return output_filepath if office_converter: try: office_converter.convert(input_filepath, mimetype=mimetype) if office_converter.exists: input_filepath", "= create_image_cache_filename(input_filepath, *args, **kwargs) if os.path.exists(output_filepath): return output_filepath if office_converter:", "the already detected mimetype mimetype = office_converter.mimetype except OfficeConversionError: raise", "absolute_import import hashlib import logging import os from django.utils.encoding import", "if zoom != 100: transformations.append( { 'transformation': TRANSFORMATION_ZOOM, 'arguments': {'percent':", "mimetype=mimetype) finally: if cleanup_files: fs_cleanup(input_filepath) return output_filepath def get_page_count(input_filepath): logger.debug('office_converter:", "get_available_transformations_choices(): result = [] for transformation in backend.get_available_transformations(): result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label']))", "except OSError: pass def create_image_cache_filename(input_filepath, *args, **kwargs): if input_filepath: hash_value", "hash_value) else: return None def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args,", "HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)])) return os.path.join(TEMPORARY_DIRECTORY, hash_value) else: return None def", "return result def get_format_list(): return [(format, FILE_FORMATS.get(format, u'')) for format", "import logging import os from django.utils.encoding import smart_str from common.conf.settings", "office_converter.exists: input_filepath = office_converter.output_filepath mimetype = 'application/pdf' else: # Recycle", "import fs_cleanup from .exceptions import OfficeConversionError, UnknownFileFormat from .literals import", "return backend.get_page_count(input_filepath) def get_available_transformations_choices(): result = [] for transformation in", "get_page_count(input_filepath): logger.debug('office_converter: %s' % office_converter) if office_converter: try: office_converter.convert(input_filepath) logger.debug('office_converter.exists:", "office_converter: try: office_converter.convert(input_filepath, mimetype=mimetype) if office_converter.exists: input_filepath = office_converter.output_filepath mimetype", "kwargs.get('zoom', DEFAULT_ZOOM_LEVEL) rotation = kwargs.get('rotation', DEFAULT_ROTATION) page = kwargs.get('page', DEFAULT_PAGE_NUMBER)", "TRANSFORMATION_ROTATE, 'arguments': {'degrees': rotation} } ) try: backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations,", "zoom} } ) if rotation != 0 and rotation !=", "os from django.utils.encoding import smart_str from common.conf.settings import TEMPORARY_DIRECTORY from", "*args, **kwargs): size = kwargs.get('size') file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT) zoom", "exception') if size: transformations.append( { 'transformation': TRANSFORMATION_RESIZE, 'arguments': dict(zip([u'width', u'height'],", "from .runtime import backend, office_converter HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest()", "**kwargs): if input_filepath: hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)])) return os.path.join(TEMPORARY_DIRECTORY,", "page=page, file_format=file_format, mimetype=mimetype) finally: if cleanup_files: fs_cleanup(input_filepath) return output_filepath def", "DEFAULT_ZOOM_LEVEL) rotation = kwargs.get('rotation', DEFAULT_ROTATION) page = kwargs.get('page', DEFAULT_PAGE_NUMBER) transformations", "import backend, office_converter HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest() logger =", "logging import os from django.utils.encoding import smart_str from common.conf.settings import", "unicode(args), unicode(kwargs)])) return os.path.join(TEMPORARY_DIRECTORY, hash_value) else: return None def convert(input_filepath,", "%s' % office_converter) if office_converter: try: office_converter.convert(input_filepath) logger.debug('office_converter.exists: %s' %", "x: hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__) def cache_cleanup(input_filepath, *args, **kwargs): try:", "= kwargs.get('size') file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT) zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL)", "OfficeConversionError: raise UnknownFileFormat('office converter exception') if size: transformations.append( { 'transformation':", "unicode(kwargs)])) return os.path.join(TEMPORARY_DIRECTORY, hash_value) else: return None def convert(input_filepath, output_filepath=None,", "rotation = kwargs.get('rotation', DEFAULT_ROTATION) page = kwargs.get('page', DEFAULT_PAGE_NUMBER) transformations =", "__future__ import absolute_import import hashlib import logging import os from", "from django.utils.encoding import smart_str from common.conf.settings import TEMPORARY_DIRECTORY from common.utils", "file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT) zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL) rotation =", "**kwargs): size = kwargs.get('size') file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT) zoom =", "DEFAULT_FILE_FORMAT) from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS)", "try: backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype) finally: if cleanup_files:", ") try: backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype) finally: if", "= office_converter.output_filepath except OfficeConversionError: raise UnknownFileFormat('office converter exception') return backend.get_page_count(input_filepath)", "(TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime import backend,", "kwargs.get('transformations', []) if transformations is None: transformations = [] if", "if office_converter.exists: input_filepath = office_converter.output_filepath except OfficeConversionError: raise UnknownFileFormat('office converter", "DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM,", "TRANSFORMATION_ZOOM, 'arguments': {'percent': zoom} } ) if rotation != 0", "HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__) def cache_cleanup(input_filepath,", "import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime import", "detected mimetype mimetype = office_converter.mimetype except OfficeConversionError: raise UnknownFileFormat('office converter", "'arguments': {'degrees': rotation} } ) try: backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page,", "logger.debug('office_converter.exists: %s' % office_converter.exists) if office_converter.exists: input_filepath = office_converter.output_filepath except", "0 and rotation != 360: transformations.append( { 'transformation': TRANSFORMATION_ROTATE, 'arguments':", "FILE_FORMATS) from .runtime import backend, office_converter HASH_FUNCTION = lambda x:", "output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs): size = kwargs.get('size') file_format =", "def get_available_transformations_choices(): result = [] for transformation in backend.get_available_transformations(): result.append((transformation,", "UnknownFileFormat('office converter exception') if size: transformations.append( { 'transformation': TRANSFORMATION_RESIZE, 'arguments':", "office_converter.convert(input_filepath, mimetype=mimetype) if office_converter.exists: input_filepath = office_converter.output_filepath mimetype = 'application/pdf'", "if office_converter: try: office_converter.convert(input_filepath) logger.debug('office_converter.exists: %s' % office_converter.exists) if office_converter.exists:", "if os.path.exists(output_filepath): return output_filepath if office_converter: try: office_converter.convert(input_filepath, mimetype=mimetype) if", "cleanup_files=False, mimetype=None, *args, **kwargs): size = kwargs.get('size') file_format = kwargs.get('file_format',", "= kwargs.get('file_format', DEFAULT_FILE_FORMAT) zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL) rotation = kwargs.get('rotation',", "import absolute_import import hashlib import logging import os from django.utils.encoding", "mimetype mimetype = office_converter.mimetype except OfficeConversionError: raise UnknownFileFormat('office converter exception')", "{'percent': zoom} } ) if rotation != 0 and rotation", "'transformation': TRANSFORMATION_ZOOM, 'arguments': {'percent': zoom} } ) if rotation !=", "transformations.append( { 'transformation': TRANSFORMATION_ZOOM, 'arguments': {'percent': zoom} } ) if", "import os from django.utils.encoding import smart_str from common.conf.settings import TEMPORARY_DIRECTORY", "import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE,", "transformations.append( { 'transformation': TRANSFORMATION_ROTATE, 'arguments': {'degrees': rotation} } ) try:", "else: return None def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs):", "import hashlib import logging import os from django.utils.encoding import smart_str", "def get_format_list(): return [(format, FILE_FORMATS.get(format, u'')) for format in backend.get_format_list()]", "kwargs.get('size') file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT) zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL) rotation", "office_converter) if office_converter: try: office_converter.convert(input_filepath) logger.debug('office_converter.exists: %s' % office_converter.exists) if", "is None: output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs) if os.path.exists(output_filepath): return", "office_converter.convert(input_filepath) logger.debug('office_converter.exists: %s' % office_converter.exists) if office_converter.exists: input_filepath = office_converter.output_filepath", "!= 0 and rotation != 360: transformations.append( { 'transformation': TRANSFORMATION_ROTATE,", "} ) try: backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype) finally:", "create_image_cache_filename(input_filepath, *args, **kwargs) if os.path.exists(output_filepath): return output_filepath if office_converter: try:", "from common.conf.settings import TEMPORARY_DIRECTORY from common.utils import fs_cleanup from .exceptions", "page = kwargs.get('page', DEFAULT_PAGE_NUMBER) transformations = kwargs.get('transformations', []) if transformations", "finally: if cleanup_files: fs_cleanup(input_filepath) return output_filepath def get_page_count(input_filepath): logger.debug('office_converter: %s'", "office_converter HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__) def", "{ 'transformation': TRANSFORMATION_ROTATE, 'arguments': {'degrees': rotation} } ) try: backend.convert_file(input_filepath=input_filepath,", "office_converter.exists) if office_converter.exists: input_filepath = office_converter.output_filepath except OfficeConversionError: raise UnknownFileFormat('office", "office_converter: try: office_converter.convert(input_filepath) logger.debug('office_converter.exists: %s' % office_converter.exists) if office_converter.exists: input_filepath", "TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime import backend, office_converter HASH_FUNCTION =", "OfficeConversionError, UnknownFileFormat from .literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from", ") if rotation != 0 and rotation != 360: transformations.append(", "logger.debug('office_converter: %s' % office_converter) if office_converter: try: office_converter.convert(input_filepath) logger.debug('office_converter.exists: %s'", "dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR))) } ) if zoom != 100: transformations.append(", "size.split(DIMENSION_SEPARATOR))) } ) if zoom != 100: transformations.append( { 'transformation':", "logger = logging.getLogger(__name__) def cache_cleanup(input_filepath, *args, **kwargs): try: os.remove(create_image_cache_filename(input_filepath, *args,", "zoom != 100: transformations.append( { 'transformation': TRANSFORMATION_ZOOM, 'arguments': {'percent': zoom}", "transformation in backend.get_available_transformations(): result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label'])) return result def get_format_list(): return", "DEFAULT_FILE_FORMAT) zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL) rotation = kwargs.get('rotation', DEFAULT_ROTATION) page", "None def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs): size =", ".runtime import backend, office_converter HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest() logger", "hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)])) return os.path.join(TEMPORARY_DIRECTORY, hash_value) else: return", "import TEMPORARY_DIRECTORY from common.utils import fs_cleanup from .exceptions import OfficeConversionError,", "= kwargs.get('zoom', DEFAULT_ZOOM_LEVEL) rotation = kwargs.get('rotation', DEFAULT_ROTATION) page = kwargs.get('page',", "'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR))) } ) if zoom != 100:", "% office_converter) if office_converter: try: office_converter.convert(input_filepath) logger.debug('office_converter.exists: %s' % office_converter.exists)", "result = [] for transformation in backend.get_available_transformations(): result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label'])) return", "= HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)])) return os.path.join(TEMPORARY_DIRECTORY, hash_value) else: return None", "try: office_converter.convert(input_filepath, mimetype=mimetype) if office_converter.exists: input_filepath = office_converter.output_filepath mimetype =", "if cleanup_files: fs_cleanup(input_filepath) return output_filepath def get_page_count(input_filepath): logger.debug('office_converter: %s' %", "*args, **kwargs): if input_filepath: hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)])) return", "if transformations is None: transformations = [] if output_filepath is", "raise UnknownFileFormat('office converter exception') return backend.get_page_count(input_filepath) def get_available_transformations_choices(): result =", "= [] for transformation in backend.get_available_transformations(): result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label'])) return result", "# Recycle the already detected mimetype mimetype = office_converter.mimetype except", "= office_converter.mimetype except OfficeConversionError: raise UnknownFileFormat('office converter exception') if size:", "backend, office_converter HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__)", "[] if output_filepath is None: output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs)", "TEMPORARY_DIRECTORY from common.utils import fs_cleanup from .exceptions import OfficeConversionError, UnknownFileFormat", "from .literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals import", "os.path.join(TEMPORARY_DIRECTORY, hash_value) else: return None def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None,", "mimetype=mimetype) if office_converter.exists: input_filepath = office_converter.output_filepath mimetype = 'application/pdf' else:", "file_format=file_format, mimetype=mimetype) finally: if cleanup_files: fs_cleanup(input_filepath) return output_filepath def get_page_count(input_filepath):", ") if zoom != 100: transformations.append( { 'transformation': TRANSFORMATION_ZOOM, 'arguments':", "for transformation in backend.get_available_transformations(): result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label'])) return result def get_format_list():", "[] for transformation in backend.get_available_transformations(): result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label'])) return result def", "kwargs.get('page', DEFAULT_PAGE_NUMBER) transformations = kwargs.get('transformations', []) if transformations is None:", "converter exception') if size: transformations.append( { 'transformation': TRANSFORMATION_RESIZE, 'arguments': dict(zip([u'width',", "rotation != 0 and rotation != 360: transformations.append( { 'transformation':", "if office_converter.exists: input_filepath = office_converter.output_filepath mimetype = 'application/pdf' else: #", "{ 'transformation': TRANSFORMATION_ZOOM, 'arguments': {'percent': zoom} } ) if rotation", "= office_converter.output_filepath mimetype = 'application/pdf' else: # Recycle the already", "} ) if zoom != 100: transformations.append( { 'transformation': TRANSFORMATION_ZOOM,", "(DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE,", "else: # Recycle the already detected mimetype mimetype = office_converter.mimetype", "**kwargs) if os.path.exists(output_filepath): return output_filepath if office_converter: try: office_converter.convert(input_filepath, mimetype=mimetype)", "office_converter.output_filepath except OfficeConversionError: raise UnknownFileFormat('office converter exception') return backend.get_page_count(input_filepath) def", "office_converter.output_filepath mimetype = 'application/pdf' else: # Recycle the already detected", "TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime import backend, office_converter", "output_filepath is None: output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs) if os.path.exists(output_filepath):" ]
[ "= plot_y * x_std[1] + x_mean[1] plot_data(x_mat, y, plot_x, plot_y)", "frame.values x_mat = data[:, 0:2] # exam scores y =", "/ theta[2]) * plot_x # denormalize the points plot_x =", "theta2*x2 = 0 # x2 = -theta0 / theta2 -", "% (45, 85, p[0, 0])) # Predict on train set", "plot admitted plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue', marker='+') # plot", "pandas as pd import matplotlib.pyplot as plt import utils def", "line plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])]) plot_y = -1", "set prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5) actual = (y ==", "theta0 + theta1*x1 + theta2*x2 = 0 # x2 =", "input (input has large values which causes sigmoid to always", "====> x_mat.dot(theta) = 0 # so our line equation is:", "boundary # The decision boundary is the threshold line that", "training set has success of %s/%s' % (predict_success, y.shape[0])) #", "theta2 - (theta1/theta2)*x1 theta = theta.flatten() # calc 2 points", "2 score') plt.legend(['boundary', 'admitted', 'rejected']) def main(): print('Loading dataset...') #", "or not # normalize input (input has large values which", "or 0) x_mean = np.mean(x_mat, axis=0) x_std = np.std(x_mat, axis=0)", "(x_mat - x_mean) / x_std # add intercept x_norm =", "0 # x2 = -theta0 / theta2 - (theta1/theta2)*x1 theta", "(y == 0).flatten() # plot decision boundary plt.plot(db_x, db_y) #", "# calc 2 points on the line plot_x = np.array([np.min(x_norm[:,", "theta[2]) * plot_x # denormalize the points plot_x = plot_x", "= data[:, 0:2] # exam scores y = data[:, 2:3]", "= -1 * (theta[0] / theta[2]) - (theta[1] / theta[2])", "y.shape[0])) # calc decision boundary # The decision boundary is", "= 0 # so our line equation is: theta0 +", "= frame.values x_mat = data[:, 0:2] # exam scores y", "y = data[:, 2:3] # admitted or not # normalize", "add intercept x_norm = np.insert(x_norm, 0, 1, axis=1) # Learn", "/ x_std joe_norm = np.insert(joe_norm, 0, 1, axis=1) p =", "p = sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) = 0 #", "score') plt.ylabel('exam 2 score') plt.legend(['boundary', 'admitted', 'rejected']) def main(): print('Loading", "score, exam 2 score, bool whether admitted frame = pd.read_csv('ex2data1.csv',", "color='blue', marker='+') # plot rejected plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red',", "admitted frame = pd.read_csv('ex2data1.csv', header=None) data = frame.values x_mat =", "admitted or not # normalize input (input has large values", "# this means that on this line the prediction is", "= 0 # x2 = -theta0 / theta2 - (theta1/theta2)*x1", "print('starting to learn...') (loss, reg_loss, theta) = utils.learn(x_norm, y, 5000,", "# predict for student joe = np.array([[45, 85]]) joe_norm =", "print('Final loss %s' % loss[-1]) print('Final theta \\n%s' % theta)", "p[0, 0])) # Predict on train set prediction = (utils.sigmoid(x_norm.dot(theta))", "import pandas as pd import matplotlib.pyplot as plt import utils", "= np.sum(prediction == actual) print('Model evaluation on training set has", "(theta1/theta2)*x1 theta = theta.flatten() # calc 2 points on the", "= 0.5 ====> x_mat.dot(theta) = 0 # so our line", "# exam scores y = data[:, 2:3] # admitted or", "threshold line that separates true/false predictions, # this means that", "print('Model evaluation on training set has success of %s/%s' %", "predictions, # this means that on this line the prediction", "plot_x = plot_x * x_std[0] + x_mean[0] plot_y = plot_y", "that on this line the prediction is exactly 0.5, meaning:", "predict_success = np.sum(prediction == actual) print('Model evaluation on training set", "# calc decision boundary # The decision boundary is the", "= plot_x * x_std[0] + x_mean[0] plot_y = plot_y *", "0.5 ====> x_mat.dot(theta) = 0 # so our line equation", "x_mean[0] plot_y = plot_y * x_std[1] + x_mean[1] plot_data(x_mat, y,", "x_std[1] + x_mean[1] plot_data(x_mat, y, plot_x, plot_y) utils.plot_loss(loss) plt.show() if", "= (y == 1) predict_success = np.sum(prediction == actual) print('Model", "admitted plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue', marker='+') # plot rejected", "plot_y * x_std[1] + x_mean[1] plot_data(x_mat, y, plot_x, plot_y) utils.plot_loss(loss)", "# so our line equation is: theta0 + theta1*x1 +", "boundary plt.plot(db_x, db_y) # plot admitted plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1],", "true/false predictions, # this means that on this line the", "separates true/false predictions, # this means that on this line", "x_mat = data[:, 0:2] # exam scores y = data[:,", "== 0).flatten() # plot decision boundary plt.plot(db_x, db_y) # plot", "(predict_success, y.shape[0])) # calc decision boundary # The decision boundary", "train set prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5) actual = (y", "always be 1 or 0) x_mean = np.mean(x_mat, axis=0) x_std", "to learn...') (loss, reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1)", "0).flatten() # plot decision boundary plt.plot(db_x, db_y) # plot admitted", "(joe - x_mean) / x_std joe_norm = np.insert(joe_norm, 0, 1,", "student joe = np.array([[45, 85]]) joe_norm = (joe - x_mean)", "as np import pandas as pd import matplotlib.pyplot as plt", "the points plot_x = plot_x * x_std[0] + x_mean[0] plot_y", "= utils.learn(x_norm, y, 5000, 0.1) print('Final loss %s' % loss[-1])", "2 score, bool whether admitted frame = pd.read_csv('ex2data1.csv', header=None) data", "decision boundary plt.plot(db_x, db_y) # plot admitted plt.scatter(x_mat[admitted, 0], x_mat[admitted,", "np.std(x_mat, axis=0) x_norm = (x_mat - x_mean) / x_std #", "1 or 0) x_mean = np.mean(x_mat, axis=0) x_std = np.std(x_mat,", "is: theta0 + theta1*x1 + theta2*x2 = 0 # x2", "import utils def plot_data(x_mat, y, db_x, db_y): plt.figure() plt.title('Data') admitted", "not # normalize input (input has large values which causes", "x_mean) / x_std joe_norm = np.insert(joe_norm, 0, 1, axis=1) p", "theta) # predict for student joe = np.array([[45, 85]]) joe_norm", "means that on this line the prediction is exactly 0.5,", "= (y == 1).flatten() rejected = (y == 0).flatten() #", "/ x_std # add intercept x_norm = np.insert(x_norm, 0, 1,", "1, axis=1) # Learn model print('starting to learn...') (loss, reg_loss,", "# plot admitted plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue', marker='+') #", "on training set has success of %s/%s' % (predict_success, y.shape[0]))", "x_std[0] + x_mean[0] plot_y = plot_y * x_std[1] + x_mean[1]", "- (theta[1] / theta[2]) * plot_x # denormalize the points", "as pd import matplotlib.pyplot as plt import utils def plot_data(x_mat,", "sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) = 0 # so our", "- x_mean) / x_std joe_norm = np.insert(joe_norm, 0, 1, axis=1)", "= np.insert(x_norm, 0, 1, axis=1) # Learn model print('starting to", "be 1 or 0) x_mean = np.mean(x_mat, axis=0) x_std =", "= (y == 0).flatten() # plot decision boundary plt.plot(db_x, db_y)", "1, axis=1) p = utils.sigmoid(joe_norm.dot(theta)) print('Student with grades %s and", "score, bool whether admitted frame = pd.read_csv('ex2data1.csv', header=None) data =", "whether admitted frame = pd.read_csv('ex2data1.csv', header=None) data = frame.values x_mat", "x_std # add intercept x_norm = np.insert(x_norm, 0, 1, axis=1)", "db_x, db_y): plt.figure() plt.title('Data') admitted = (y == 1).flatten() rejected", "1])]) plot_y = -1 * (theta[0] / theta[2]) - (theta[1]", "exam 2 score, bool whether admitted frame = pd.read_csv('ex2data1.csv', header=None)", "meaning: # p = sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) =", "(theta[1] / theta[2]) * plot_x # denormalize the points plot_x", "rejected = (y == 0).flatten() # plot decision boundary plt.plot(db_x,", "+ theta1*x1 + theta2*x2 = 0 # x2 = -theta0", "numpy as np import pandas as pd import matplotlib.pyplot as", "* x_std[0] + x_mean[0] plot_y = plot_y * x_std[1] +", "0) x_mean = np.mean(x_mat, axis=0) x_std = np.std(x_mat, axis=0) x_norm", "db_y) # plot admitted plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue', marker='+')", "= utils.sigmoid(joe_norm.dot(theta)) print('Student with grades %s and %s has admission", "= pd.read_csv('ex2data1.csv', header=None) data = frame.values x_mat = data[:, 0:2]", "plt.legend(['boundary', 'admitted', 'rejected']) def main(): print('Loading dataset...') # data is:", "plt import utils def plot_data(x_mat, y, db_x, db_y): plt.figure() plt.title('Data')", "probability: %s' % (45, 85, p[0, 0])) # Predict on", "for student joe = np.array([[45, 85]]) joe_norm = (joe -", "-theta0 / theta2 - (theta1/theta2)*x1 theta = theta.flatten() # calc", "plot rejected plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o') plt.xlabel('exam", "print('Student with grades %s and %s has admission probability: %s'", "plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue', marker='+') # plot rejected plt.scatter(x_mat[rejected,", "= (utils.sigmoid(x_norm.dot(theta)) >= 0.5) actual = (y == 1) predict_success", "# plot rejected plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o')", "# add intercept x_norm = np.insert(x_norm, 0, 1, axis=1) #", "line the prediction is exactly 0.5, meaning: # p =", "pd import matplotlib.pyplot as plt import utils def plot_data(x_mat, y,", "%s' % (45, 85, p[0, 0])) # Predict on train", "decision boundary # The decision boundary is the threshold line", "axis=0) x_std = np.std(x_mat, axis=0) x_norm = (x_mat - x_mean)", "rejected plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o') plt.xlabel('exam 1", "1).flatten() rejected = (y == 0).flatten() # plot decision boundary", "import numpy as np import pandas as pd import matplotlib.pyplot", "is: exam 1 score, exam 2 score, bool whether admitted", "exam 1 score, exam 2 score, bool whether admitted frame", "theta = theta.flatten() # calc 2 points on the line", "# data is: exam 1 score, exam 2 score, bool", "sigmoid to always be 1 or 0) x_mean = np.mean(x_mat,", "equation is: theta0 + theta1*x1 + theta2*x2 = 0 #", "%s has admission probability: %s' % (45, 85, p[0, 0]))", "on train set prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5) actual =", "and %s has admission probability: %s' % (45, 85, p[0,", "pd.read_csv('ex2data1.csv', header=None) data = frame.values x_mat = data[:, 0:2] #", "+ x_mean[1] plot_data(x_mat, y, plot_x, plot_y) utils.plot_loss(loss) plt.show() if __name__", "prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5) actual = (y == 1)", "'admitted', 'rejected']) def main(): print('Loading dataset...') # data is: exam", "actual) print('Model evaluation on training set has success of %s/%s'", "x_std joe_norm = np.insert(joe_norm, 0, 1, axis=1) p = utils.sigmoid(joe_norm.dot(theta))", "points plot_x = plot_x * x_std[0] + x_mean[0] plot_y =", "utils.sigmoid(joe_norm.dot(theta)) print('Student with grades %s and %s has admission probability:", "denormalize the points plot_x = plot_x * x_std[0] + x_mean[0]", "of %s/%s' % (predict_success, y.shape[0])) # calc decision boundary #", "this means that on this line the prediction is exactly", "Predict on train set prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5) actual", "The decision boundary is the threshold line that separates true/false", "has admission probability: %s' % (45, 85, p[0, 0])) #", "joe_norm = (joe - x_mean) / x_std joe_norm = np.insert(joe_norm,", "# p = sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) = 0", "np import pandas as pd import matplotlib.pyplot as plt import", "x_norm = np.insert(x_norm, 0, 1, axis=1) # Learn model print('starting", "that separates true/false predictions, # this means that on this", "the prediction is exactly 0.5, meaning: # p = sigmoid(x_mat.dot(theta))", "= (x_mat - x_mean) / x_std # add intercept x_norm", "scores y = data[:, 2:3] # admitted or not #", "(input has large values which causes sigmoid to always be", "joe = np.array([[45, 85]]) joe_norm = (joe - x_mean) /", "the threshold line that separates true/false predictions, # this means", "y, plot_x, plot_y) utils.plot_loss(loss) plt.show() if __name__ == '__main__': main()", "+ theta2*x2 = 0 # x2 = -theta0 / theta2", "# denormalize the points plot_x = plot_x * x_std[0] +", "loss %s' % loss[-1]) print('Final theta \\n%s' % theta) #", "axis=1) p = utils.sigmoid(joe_norm.dot(theta)) print('Student with grades %s and %s", "print('Final theta \\n%s' % theta) # predict for student joe", "plot_data(x_mat, y, plot_x, plot_y) utils.plot_loss(loss) plt.show() if __name__ == '__main__':", "# x2 = -theta0 / theta2 - (theta1/theta2)*x1 theta =", "np.max(x_norm[:, 1])]) plot_y = -1 * (theta[0] / theta[2]) -", "line that separates true/false predictions, # this means that on", "score') plt.legend(['boundary', 'admitted', 'rejected']) def main(): print('Loading dataset...') # data", "# Learn model print('starting to learn...') (loss, reg_loss, theta) =", ">= 0.5) actual = (y == 1) predict_success = np.sum(prediction", "-1 * (theta[0] / theta[2]) - (theta[1] / theta[2]) *", "line equation is: theta0 + theta1*x1 + theta2*x2 = 0", "plt.ylabel('exam 2 score') plt.legend(['boundary', 'admitted', 'rejected']) def main(): print('Loading dataset...')", "model print('starting to learn...') (loss, reg_loss, theta) = utils.learn(x_norm, y,", "data is: exam 1 score, exam 2 score, bool whether", "has success of %s/%s' % (predict_success, y.shape[0])) # calc decision", "data[:, 0:2] # exam scores y = data[:, 2:3] #", "== actual) print('Model evaluation on training set has success of", "exam scores y = data[:, 2:3] # admitted or not", "admission probability: %s' % (45, 85, p[0, 0])) # Predict", "learn...') (loss, reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1) print('Final", "data = frame.values x_mat = data[:, 0:2] # exam scores", "theta.flatten() # calc 2 points on the line plot_x =", "(y == 1) predict_success = np.sum(prediction == actual) print('Model evaluation", "exactly 0.5, meaning: # p = sigmoid(x_mat.dot(theta)) = 0.5 ====>", "np.mean(x_mat, axis=0) x_std = np.std(x_mat, axis=0) x_norm = (x_mat -", "normalize input (input has large values which causes sigmoid to", "= (joe - x_mean) / x_std joe_norm = np.insert(joe_norm, 0,", "(loss, reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1) print('Final loss", "x_std = np.std(x_mat, axis=0) x_norm = (x_mat - x_mean) /", "db_y): plt.figure() plt.title('Data') admitted = (y == 1).flatten() rejected =", "1], edgecolors='red', facecolors='none', marker='o') plt.xlabel('exam 1 score') plt.ylabel('exam 2 score')", "plt.title('Data') admitted = (y == 1).flatten() rejected = (y ==", "our line equation is: theta0 + theta1*x1 + theta2*x2 =", "evaluation on training set has success of %s/%s' % (predict_success,", "plt.figure() plt.title('Data') admitted = (y == 1).flatten() rejected = (y", "% (predict_success, y.shape[0])) # calc decision boundary # The decision", "Learn model print('starting to learn...') (loss, reg_loss, theta) = utils.learn(x_norm,", "plt.plot(db_x, db_y) # plot admitted plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue',", "so our line equation is: theta0 + theta1*x1 + theta2*x2", "0], x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o') plt.xlabel('exam 1 score') plt.ylabel('exam", "with grades %s and %s has admission probability: %s' %", "actual = (y == 1) predict_success = np.sum(prediction == actual)", "marker='o') plt.xlabel('exam 1 score') plt.ylabel('exam 2 score') plt.legend(['boundary', 'admitted', 'rejected'])", "1 score, exam 2 score, bool whether admitted frame =", "y, 5000, 0.1) print('Final loss %s' % loss[-1]) print('Final theta", "def plot_data(x_mat, y, db_x, db_y): plt.figure() plt.title('Data') admitted = (y", "plot_x * x_std[0] + x_mean[0] plot_y = plot_y * x_std[1]", "utils def plot_data(x_mat, y, db_x, db_y): plt.figure() plt.title('Data') admitted =", "reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1) print('Final loss %s'", "admitted = (y == 1).flatten() rejected = (y == 0).flatten()", "print('Loading dataset...') # data is: exam 1 score, exam 2", "calc 2 points on the line plot_x = np.array([np.min(x_norm[:, 1]),", "# The decision boundary is the threshold line that separates", "0.5, meaning: # p = sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta)", "np.array([[45, 85]]) joe_norm = (joe - x_mean) / x_std joe_norm", "x_mean[1] plot_data(x_mat, y, plot_x, plot_y) utils.plot_loss(loss) plt.show() if __name__ ==", "%s and %s has admission probability: %s' % (45, 85,", "1 score') plt.ylabel('exam 2 score') plt.legend(['boundary', 'admitted', 'rejected']) def main():", "= np.array([[45, 85]]) joe_norm = (joe - x_mean) / x_std", "= data[:, 2:3] # admitted or not # normalize input", "on the line plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])]) plot_y", "axis=0) x_norm = (x_mat - x_mean) / x_std # add", "x_mean) / x_std # add intercept x_norm = np.insert(x_norm, 0,", "= sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) = 0 # so", "calc decision boundary # The decision boundary is the threshold", "edgecolors='red', facecolors='none', marker='o') plt.xlabel('exam 1 score') plt.ylabel('exam 2 score') plt.legend(['boundary',", "1) predict_success = np.sum(prediction == actual) print('Model evaluation on training", "== 1) predict_success = np.sum(prediction == actual) print('Model evaluation on", "0, 1, axis=1) p = utils.sigmoid(joe_norm.dot(theta)) print('Student with grades %s", "0.1) print('Final loss %s' % loss[-1]) print('Final theta \\n%s' %", "0:2] # exam scores y = data[:, 2:3] # admitted", "joe_norm = np.insert(joe_norm, 0, 1, axis=1) p = utils.sigmoid(joe_norm.dot(theta)) print('Student", "plot decision boundary plt.plot(db_x, db_y) # plot admitted plt.scatter(x_mat[admitted, 0],", "= np.insert(joe_norm, 0, 1, axis=1) p = utils.sigmoid(joe_norm.dot(theta)) print('Student with", "axis=1) # Learn model print('starting to learn...') (loss, reg_loss, theta)", "+ x_mean[0] plot_y = plot_y * x_std[1] + x_mean[1] plot_data(x_mat,", "85, p[0, 0])) # Predict on train set prediction =", "values which causes sigmoid to always be 1 or 0)", "y, db_x, db_y): plt.figure() plt.title('Data') admitted = (y == 1).flatten()", "x_mat[admitted, 1], color='blue', marker='+') # plot rejected plt.scatter(x_mat[rejected, 0], x_mat[rejected,", "plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o') plt.xlabel('exam 1 score')", "dataset...') # data is: exam 1 score, exam 2 score,", "theta) = utils.learn(x_norm, y, 5000, 0.1) print('Final loss %s' %", "%s' % loss[-1]) print('Final theta \\n%s' % theta) # predict", "decision boundary is the threshold line that separates true/false predictions,", "large values which causes sigmoid to always be 1 or", "prediction is exactly 0.5, meaning: # p = sigmoid(x_mat.dot(theta)) =", "= theta.flatten() # calc 2 points on the line plot_x", "(utils.sigmoid(x_norm.dot(theta)) >= 0.5) actual = (y == 1) predict_success =", "- x_mean) / x_std # add intercept x_norm = np.insert(x_norm,", "% theta) # predict for student joe = np.array([[45, 85]])", "p = utils.sigmoid(joe_norm.dot(theta)) print('Student with grades %s and %s has", "# Predict on train set prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5)", "which causes sigmoid to always be 1 or 0) x_mean", "* x_std[1] + x_mean[1] plot_data(x_mat, y, plot_x, plot_y) utils.plot_loss(loss) plt.show()", "plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])]) plot_y = -1 *", "(45, 85, p[0, 0])) # Predict on train set prediction", "5000, 0.1) print('Final loss %s' % loss[-1]) print('Final theta \\n%s'", "has large values which causes sigmoid to always be 1", "is the threshold line that separates true/false predictions, # this", "plot_y = plot_y * x_std[1] + x_mean[1] plot_data(x_mat, y, plot_x,", "utils.learn(x_norm, y, 5000, 0.1) print('Final loss %s' % loss[-1]) print('Final", "this line the prediction is exactly 0.5, meaning: # p", "# plot decision boundary plt.plot(db_x, db_y) # plot admitted plt.scatter(x_mat[admitted,", "% loss[-1]) print('Final theta \\n%s' % theta) # predict for", "= np.mean(x_mat, axis=0) x_std = np.std(x_mat, axis=0) x_norm = (x_mat", "to always be 1 or 0) x_mean = np.mean(x_mat, axis=0)", "set has success of %s/%s' % (predict_success, y.shape[0])) # calc", "1]), np.max(x_norm[:, 1])]) plot_y = -1 * (theta[0] / theta[2])", "np.insert(x_norm, 0, 1, axis=1) # Learn model print('starting to learn...')", "as plt import utils def plot_data(x_mat, y, db_x, db_y): plt.figure()", "data[:, 2:3] # admitted or not # normalize input (input", "0], x_mat[admitted, 1], color='blue', marker='+') # plot rejected plt.scatter(x_mat[rejected, 0],", "grades %s and %s has admission probability: %s' % (45,", "'rejected']) def main(): print('Loading dataset...') # data is: exam 1", "np.sum(prediction == actual) print('Model evaluation on training set has success", "- (theta1/theta2)*x1 theta = theta.flatten() # calc 2 points on", "* (theta[0] / theta[2]) - (theta[1] / theta[2]) * plot_x", "marker='+') # plot rejected plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red', facecolors='none',", "bool whether admitted frame = pd.read_csv('ex2data1.csv', header=None) data = frame.values", "def main(): print('Loading dataset...') # data is: exam 1 score,", "predict for student joe = np.array([[45, 85]]) joe_norm = (joe", "* plot_x # denormalize the points plot_x = plot_x *", "# admitted or not # normalize input (input has large", "plot_x # denormalize the points plot_x = plot_x * x_std[0]", "theta \\n%s' % theta) # predict for student joe =", "85]]) joe_norm = (joe - x_mean) / x_std joe_norm =", "/ theta2 - (theta1/theta2)*x1 theta = theta.flatten() # calc 2", "0.5) actual = (y == 1) predict_success = np.sum(prediction ==", "2:3] # admitted or not # normalize input (input has", "x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o') plt.xlabel('exam 1 score') plt.ylabel('exam 2", "# normalize input (input has large values which causes sigmoid", "causes sigmoid to always be 1 or 0) x_mean =", "x2 = -theta0 / theta2 - (theta1/theta2)*x1 theta = theta.flatten()", "x_norm = (x_mat - x_mean) / x_std # add intercept", "header=None) data = frame.values x_mat = data[:, 0:2] # exam", "on this line the prediction is exactly 0.5, meaning: #", "boundary is the threshold line that separates true/false predictions, #", "intercept x_norm = np.insert(x_norm, 0, 1, axis=1) # Learn model", "1], color='blue', marker='+') # plot rejected plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1],", "0])) # Predict on train set prediction = (utils.sigmoid(x_norm.dot(theta)) >=", "= np.std(x_mat, axis=0) x_norm = (x_mat - x_mean) / x_std", "2 points on the line plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:,", "facecolors='none', marker='o') plt.xlabel('exam 1 score') plt.ylabel('exam 2 score') plt.legend(['boundary', 'admitted',", "/ theta[2]) - (theta[1] / theta[2]) * plot_x # denormalize", "== 1).flatten() rejected = (y == 0).flatten() # plot decision", "np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])]) plot_y = -1 * (theta[0] /", "0 # so our line equation is: theta0 + theta1*x1", "x_mean = np.mean(x_mat, axis=0) x_std = np.std(x_mat, axis=0) x_norm =", "theta1*x1 + theta2*x2 = 0 # x2 = -theta0 /", "x_mat.dot(theta) = 0 # so our line equation is: theta0", "theta[2]) - (theta[1] / theta[2]) * plot_x # denormalize the", "np.insert(joe_norm, 0, 1, axis=1) p = utils.sigmoid(joe_norm.dot(theta)) print('Student with grades", "loss[-1]) print('Final theta \\n%s' % theta) # predict for student", "is exactly 0.5, meaning: # p = sigmoid(x_mat.dot(theta)) = 0.5", "= np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])]) plot_y = -1 * (theta[0]", "%s/%s' % (predict_success, y.shape[0])) # calc decision boundary # The", "plot_data(x_mat, y, db_x, db_y): plt.figure() plt.title('Data') admitted = (y ==", "matplotlib.pyplot as plt import utils def plot_data(x_mat, y, db_x, db_y):", "plot_y = -1 * (theta[0] / theta[2]) - (theta[1] /", "<reponame>ValYouW/DeepLearningCourse import numpy as np import pandas as pd import", "= -theta0 / theta2 - (theta1/theta2)*x1 theta = theta.flatten() #", "main(): print('Loading dataset...') # data is: exam 1 score, exam", "frame = pd.read_csv('ex2data1.csv', header=None) data = frame.values x_mat = data[:,", "points on the line plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])])", "import matplotlib.pyplot as plt import utils def plot_data(x_mat, y, db_x,", "0, 1, axis=1) # Learn model print('starting to learn...') (loss,", "(y == 1).flatten() rejected = (y == 0).flatten() # plot", "(theta[0] / theta[2]) - (theta[1] / theta[2]) * plot_x #", "success of %s/%s' % (predict_success, y.shape[0])) # calc decision boundary", "\\n%s' % theta) # predict for student joe = np.array([[45,", "plt.xlabel('exam 1 score') plt.ylabel('exam 2 score') plt.legend(['boundary', 'admitted', 'rejected']) def", "the line plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])]) plot_y =" ]
[ ").numpy() results_df = pd.DataFrame( data=results_dump, columns=[ \"epoch\", \"iteration\", \"processing_stats\", \"dataflow_stats\",", "profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" EVENT_FILTER_THESHOLD_TIME =", "None: \"\"\"Attach BasicTimeProfiler to the given engine. Args: engine: the", "for row in results[:-3]: # format min/idx and max/idx row[3]", "List[str]]) -> None: # reset the variables used for profiling", "Dict) -> str: \"\"\" Method to print the aggregated results", "total_eh_time = round(float(total_eh_time), 5) def compute_basic_stats( times: Union[Sequence, torch.Tensor] )", "min/index | max/index | mean | std Processing function: {processing_stats}", "return out others = { k: odict_to_str(v) if isinstance(v, OrderedDict)", "# processing timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {})) #", "0.00393s/1875, max/index: 0.00784s/0, mean: 0.00602s, std: 0.00034s] Dataflow took total", "def _as_last_get_batch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i", "an object of the profiler and attach an engine to", "enumerate(engine._event_handlers[e]): if not self._is_internal_handler(func): engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)", "0.00300 | 0.02693 Event handlers: 2.82721 - Events.STARTED: [] 0.00000", "float], Union[str, float]] max_index = (\"None\", \"None\") # type: Tuple[Union[str,", "+ (\" \" * SPACING_SIZE) line_length_lst[0] += padding + SPACING_SIZE", "self._as_last_epoch_started, self._as_last_epoch_completed, self._as_last_iter_started, self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed, self._as_last_completed, ] def _reset(self,", "order to fix this.\" ) num_iters_per_epoch = engine.state.epoch_length self.max_epochs =", "List[float] self.processing_times = [] # type: List[float] self.event_handlers_times = {}", "trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" EVENT_FILTER_THESHOLD_TIME = 0.0001", "int, text_dir: str = \">\") -> None: row_format_lst[0] += \"{:", "compute on non-zero data: data = data[data > 0] total", "filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text -----------------------------------------------------------------", "self.event_handlers_times[Events.ITERATION_STARTED][i] = t self._processing_timer.reset() def _as_first_iter_completed(self, engine: Engine) -> None:", "torch.Tensor] ) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]: data", "self.event_handlers_times[e] ] ) total_eh_time = round(float(total_eh_time), 5) def compute_basic_stats( times:", "enumerate(engine._event_handlers[e]): if hasattr(func, \"_profiler_original\"): engine._event_handlers[e][i] = (func._profiler_original, args, kwargs) def", "_) in engine._event_handlers[e] if not self._is_internal_handler(h) ] for e in", "in self.events_to_ignore } # Setup all other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,),", "text_dir=\"<\") for _ in headers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_format = row_format_lst[0] header_sep", "None: self._event_handlers_timer.reset() self._dataflow_timer.reset() def _as_last_get_batch_started(self, engine: Engine) -> None: t", "0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000 -", "e: [ self._get_callable_name(h) for (h, _, _) in engine._event_handlers[e] if", "engine._event_handlers: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if hasattr(func,", "header_sep_lst = [\"\"] line_length_lst = [-SPACING_SIZE] def add_column(padding: int, text_dir:", "file output path containing a filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv')", "- Events.COMPLETED: {COMPLETED_names} {COMPLETED} \"\"\".format( processing_stats=odict_to_str(results[\"processing_stats\"]), dataflow_stats=odict_to_str(results[\"dataflow_stats\"]), **others, ) print(output_message)", "> 0 else \"not triggered\" # type: Union[str, float] min_index", "code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- epoch iteration", "= profiler.get_results() \"\"\" total_eh_time = sum( [(self.event_handlers_times[e]).sum() for e in", "\" | \".join([to_str(v) for v in d.values()]) return out others", "self.dataflow_times = [] self.processing_times = [] self.event_handlers_times = {e: {h:", "data[data > 0] total = round(torch.sum(data).item(), 5) if len(data) >", "*compute_basic_stats(self.processing_times)]) event_handler_stats.append([\"Dataflow\", \"None\", *compute_basic_stats(self.dataflow_times)]) return event_handler_stats def write_results(self, output_path: str)", "# pad all tensors to have same length cols =", "\"As epoch_length is not set, we can not use BasicTimeProfiler", "not in self.events_to_ignore } # Setup all other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started,", "-> None: self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value() def _as_first_epoch_started(self, engine: Engine) ->", "non-zero data: data = data[data > 0] total = round(torch.sum(data).item(),", "= { Events.STARTED: torch.zeros(1), Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs),", "after profiling setattr(_timeit_handler, \"_profiler_original\", handler) return _timeit_handler def _timeit_processing(self) ->", "- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860 | 0.11529/0 | 0.14977/13", "std = \"None\" # type: Union[str, float] if len(data) >", "handlers time row append(row_format.format(*results[-3])) append(header_sep) summary_format = \"{} took total", "= self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started =", "2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790 -", "float] min_index = (\"None\", \"None\") # type: Tuple[Union[str, float], Union[str,", "min/index | max/index | mean | std Processing function: 157.46292", "\"Event_ITERATION_COMPLETED\", \"Event_GET_BATCH_STARTED\", \"Event_GET_BATCH_COMPLETED\", ], ) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results:", "self._event_handlers_timer.reset() def _as_last_started(self, engine: Engine) -> None: self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()", "| mean | std Processing function: 157.46292 | 0.01452/1501 |", "_as_first_epoch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_completed(self, engine: Engine)", "row[3] = \"{}s/{}\".format(*row[3]) # type: ignore[misc] row[4] = \"{}s/{}\".format(*row[4]) #", "-------------- Total 22.04571 ----------------------------------------- ----------------------- -------------- Processing took total 11.29543s", "engine: the instance of Engine to attach \"\"\" if not", "_as_first_get_batch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() self._dataflow_timer.reset() def _as_last_get_batch_started(self, engine:", "\"Event_EPOCH_COMPLETED\", \"Event_ITERATION_STARTED\", \"Event_ITERATION_COMPLETED\", \"Event_GET_BATCH_STARTED\", \"Event_GET_BATCH_COMPLETED\", ], ) results_df.to_csv(output_path, index=False) @staticmethod", "def _timeit_processing(self) -> None: # handler used for profiling processing", "= 0.0001 def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer", "engine.remove_event_handler(m, e) for e, m in zip(self._events, self._lmethods): engine.remove_event_handler(m, e)", "this.\" ) num_iters_per_epoch = engine.state.epoch_length self.max_epochs = cast(int, engine.state.max_epochs) self.total_num_iters", "0] out = [ (\"total\", torch.sum(data).item() if len(data) > 0", "iters_per_epoch = self.total_num_iters // self.max_epochs epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) +", "(func._profiler_original, args, kwargs) def _as_first_started(self, engine: Engine) -> None: #", ") results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: Dict) -> str: \"\"\"", "Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names} {COMPLETED} \"\"\".format( processing_stats=odict_to_str(results[\"processing_stats\"]), dataflow_stats=odict_to_str(results[\"dataflow_stats\"]),", "'name', str(e))})\") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Determine maximum length max_len =", "Remove added handlers: engine.remove_event_handler(self._as_last_started, Events.STARTED) for e, m in zip(self._events,", "in self.events_to_ignore] ) # type: Union[int, torch.Tensor] event_handlers_stats = dict(", "> 1: out += [ (\"min/index\", (torch.min(data).item(), torch.argmin(data).item())), (\"max/index\", (torch.max(data).item(),", "return getattr(handler, \"__qualname__\", handler.__class__.__name__) def _create_wrapped_handler(self, handler: Callable, event: EventEnum)", "\"Event_ITERATION_STARTED\", \"Event_ITERATION_COMPLETED\", \"Event_GET_BATCH_STARTED\", \"Event_GET_BATCH_COMPLETED\", ], ) results_df.to_csv(output_path, index=False) @staticmethod def", "headers.append(f\"{h} ({getattr(e, 'name', str(e))})\") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Determine maximum length", ".. code-block:: python from ignite.handlers import BasicTimeProfiler trainer = Engine(train_updater)", "None: raise ValueError( \"As epoch_length is not set, we can", "Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED, ] self._fmethods = [", "engine: Engine) -> None: if hasattr(engine.state.dataloader, \"__len__\"): num_iters_per_epoch = len(engine.state.dataloader)", "x.numel()), mode=\"constant\", value=0) for x in cols] results_dump = torch.stack(cols,", "def append(s: str) -> None: result.append(s) result.append(\"\\n\") result.append(\"\\n\") append(header_sep) append(row_format.format(*headers))", "mean: 0.00866s, std: 0.00113s] \"\"\" # adopted implementation of torch.autograd.profiler.build_table", "* SPACING_SIZE) line_length_lst[0] += padding + SPACING_SIZE add_column(handler_column_width, text_dir=\"<\") add_column(event_column_width,", "torch.arange(self.total_num_iters, dtype=torch.float32) + 1 processing_stats = self.processing_times dataflow_stats = self.dataflow_times", "__init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer", "def _as_last_epoch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() e", "file Args: output_path: file output path containing a filename ..", "epochs, iterations, processing_stats, dataflow_stats, event_started, event_completed, event_epoch_started, event_epoch_completed, event_iter_started, event_iter_completed,", "Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED, ] self._fmethods = [ self._as_first_epoch_started,", "num_epochs: int, total_num_iters: int) -> None: self.dataflow_times = torch.zeros(total_num_iters) self.processing_times", "None: # reset the variables used for profiling self.dataflow_times =", "*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ] for e in self.event_handlers_times for h in", "return OrderedDict(out) def get_results(self) -> Dict[str, Dict[str, Any]]: \"\"\" Method", "engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_get_batch_completed(self, engine: Engine) ->", "def _as_first_get_batch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_get_batch_completed(self, engine:", "@functools.wraps(handler) def _timeit_handler(*args: Any, **kwargs: Any) -> None: self._event_handlers_timer.reset() handler(*args,", "engine: Engine) -> None: # wraps original handlers for profiling", "-> Callable: @functools.wraps(handler) def _timeit_handler(*args: Any, **kwargs: Any) -> None:", "mean | std Processing function: 157.46292 | 0.01452/1501 | 0.26905/0", "end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self, engine: Engine) -> None: \"\"\"Attach", "the handlers, events, data loading and data processing times. Examples:", "in engine._allowed_events: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if", "= {e: {h: [] for h in event_handlers_names[e]} for e", "format min/idx and max/idx row[3] = \"{}/{}\".format(*row[3]) # type: ignore[misc]", "if isinstance(v, str): return v elif isinstance(v, tuple): return f\"{v[0]:.5f}/{v[1]}\"", "\"{}s/{}\".format(*row[3]) # type: ignore[misc] row[4] = \"{}s/{}\".format(*row[4]) # type: ignore[misc]", "row[4] = \"{}/{}\".format(*row[4]) # type: ignore[misc] append(row_format.format(*row)) append(header_sep) # print", "for e, m in zip(self._events, self._fmethods): engine._event_handlers[e].insert(0, (m, (engine,), {}))", "results_df = pd.DataFrame( data=results_dump, columns=[ \"epoch\", \"iteration\", \"processing_stats\", \"dataflow_stats\", \"Event_STARTED\",", "(func, args, kwargs) in enumerate(engine._event_handlers[e]): if not self._is_internal_handler(func): engine._event_handlers[e][i] =", "events are also profiled by this profiler Examples: .. code-block::", "num_iters_per_epoch self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names = { e: [ h.__qualname__ if", "iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1 processing_stats = self.processing_times dataflow_stats", "] for e in engine._allowed_events } self._reset(self.event_handlers_names) for e in", "round(float(total_eh_time), 5) def compute_basic_stats( times: Union[Sequence, torch.Tensor] ) -> List[Union[str,", "as files\") iters_per_epoch = self.total_num_iters // self.max_epochs epochs = torch.arange(self.max_epochs,", "isinstance(v, tuple): return f\"{v[0]:.5f}/{v[1]}\" return f\"{v:.5f}\" def odict_to_str(d: Mapping) ->", "max_index = (\"None\", \"None\") # type: Tuple[Union[str, float], Union[str, float]]", "- 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t d = self._dataflow_timer.value() self.dataflow_times[i] =", "self._compute_basic_stats(self.processing_times)), (\"dataflow_stats\", self._compute_basic_stats(self.dataflow_times)), (\"event_handlers_stats\", event_handlers_stats), ( \"event_handlers_names\", {str(e.name).replace(\".\", \"_\") +", "def get_results(self) -> Dict[str, Dict[str, Any]]: \"\"\" Method to fetch", "Events.STARTED: torch.zeros(1), Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters),", "torch.sum(data).item() if len(data) > 0 else \"not yet triggered\") ]", "self.event_handlers_times for h in self.event_handlers_times[e] ] ) total_eh_time = round(float(total_eh_time),", "-> None: t = self._processing_timer.value() i = engine.state.iteration - 1", "if hasattr(h, \"__qualname__\") else h.__class__.__name__ for (h, _, _) in", "dataflow times t = self._dataflow_timer.value() self.dataflow_times.append(t) def _reset(self, event_handlers_names: Mapping[EventEnum,", "str(e))})\") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Determine maximum length max_len = max([x.numel()", "for profiling processing times t = self._processing_timer.value() self.processing_times.append(t) def _timeit_dataflow(self)", "self._event_handlers_timer.reset() self._dataflow_timer.reset() def _as_last_get_batch_started(self, engine: Engine) -> None: t =", "= self._event_handlers_timer.value() hname = self._get_callable_name(handler) # filter profiled time if", "the variables used for profiling self.dataflow_times = [] self.processing_times =", "to revert back to original handler after profiling setattr(_timeit_handler, \"_profiler_original\",", "yet triggered\") ] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]]", "-> Dict[str, Dict[str, Any]]: \"\"\" Method to fetch the aggregated", "\"Total(s)\", \"Min(s)/IDX\", \"Max(s)/IDX\", \"Mean(s)\", \"Std(s)\", ] # Have to use", "-> None: self._event_handlers_timer.reset() handler(*args, **kwargs) t = self._event_handlers_timer.value() hname =", "[ (\"processing_stats\", self._compute_basic_stats(self.processing_times)), (\"dataflow_stats\", self._compute_basic_stats(self.dataflow_times)), (\"event_handlers_stats\", event_handlers_stats), ( \"event_handlers_names\", {str(e.name).replace(\".\",", "Processing function: 157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 |", "] ) def write_results(self, output_path: str) -> None: \"\"\" Method", "_timeit_handler(*args: Any, **kwargs: Any) -> None: self._event_handlers_timer.reset() handler(*args, **kwargs) t", "dtype=torch.float32) + 1 cols.insert(0, count_col) headers.insert(0, \"#\") # pad all", "for x in cols] results_dump = torch.stack(cols, dim=1).numpy() results_df =", "t = self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] =", "_as_last_iter_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i =", "SPACING_SIZE) line_length_lst[0] += padding + SPACING_SIZE add_column(handler_column_width, text_dir=\"<\") add_column(event_column_width, text_dir=\"<\")", "def _reset(self, num_epochs: int, total_num_iters: int) -> None: self.dataflow_times =", "results[-2:]: row[3] = \"{}s/{}\".format(*row[3]) # type: ignore[misc] row[4] = \"{}s/{}\".format(*row[4])", "def print_results(results: Dict) -> str: \"\"\" Method to print the", "{total_time:.5f} - Events.STARTED: {STARTED_names} {STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} -", "TypeError(f\"Argument engine should be ignite.engine.Engine, but given {type(engine)}\") if not", "min/idx and max/idx row[3] = \"{}/{}\".format(*row[3]) # type: ignore[misc] row[4]", "and data processing times. Custom events are also profiled by", "checks whether the handler is internal return any(n in repr(handler)", "is run .. code-block:: python results = profiler.get_results() \"\"\" total_eh_time", ") -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]: data =", "handler) return _timeit_handler def _timeit_processing(self) -> None: # handler used", "] self._lmethods = [ self._as_last_epoch_started, self._as_last_epoch_completed, self._as_last_iter_started, self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed,", "name of the callable handler return getattr(handler, \"__qualname__\", handler.__class__.__name__) def", "- Events.STARTED: {STARTED_names} {STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED:", "0.00533s/1874, max/index: 0.01129s/937, mean: 0.00866s, std: 0.00113s] \"\"\" # adopted", "e in engine._allowed_events: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):", "not self._is_internal_handler(h) ] for e in engine._allowed_events } self._reset(self.event_handlers_names) for", "torch.std(data).item()), ] return OrderedDict(out) def get_results(self) -> Dict[str, Dict[str, Any]]:", "None: t = self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_STARTED][e]", "22.04571 ----------------------------------------- ----------------------- -------------- Processing took total 11.29543s [min/index: 0.00393s/1875,", "0.00784s/0, mean: 0.00602s, std: 0.00034s] Dataflow took total 16.24365s [min/index:", "self.events_to_ignore } # Setup all other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {}))", "Engine) -> None: self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value() # Remove added handlers:", "results[:-3]: # format min/idx and max/idx row[3] = \"{}/{}\".format(*row[3]) #", "def _as_last_get_batch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i", "[processing_stats, dataflow_stats] headers = [\"processing_stats\", \"dataflow_stats\"] for e in self.event_handlers_times:", "2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059", "in engine._event_handlers[e] if \"BasicTimeProfiler.\" not in repr(h) # avoid adding", "_as_last_get_batch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i =", "\"#\") # pad all tensors to have same length cols", "handlers: {total_time:.5f} - Events.STARTED: {STARTED_names} {STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED}", "t def _as_first_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_completed(self,", "profiling results to a csv file Args: output_path: file output", "compute_basic_stats( times: Union[Sequence, torch.Tensor] ) -> List[Union[str, float, Tuple[Union[str, float],", "from ignite.handlers import BasicTimeProfiler trainer = Engine(train_updater) # Create an", "zip(self._events, self._lmethods): engine._event_handlers[e].append((m, (engine,), {})) # Let's go self._event_handlers_timer.reset() def", "Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED, ] self._fmethods = [ self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started,", "typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union,", "profiler.get_results() \"\"\" total_eh_time = sum( [ sum(self.event_handlers_times[e][h]) for e in", "Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),", "\"\", total_eh_time, \"\", \"\", \"\", \"\"]) event_handler_stats.append([\"Processing\", \"None\", *compute_basic_stats(self.processing_times)]) event_handler_stats.append([\"Dataflow\",", "RuntimeError(\"Need pandas to write results as files\") processing_stats = torch.tensor(self.processing_times,", "Dict[str, Dict[str, Any]]: \"\"\" Method to fetch the aggregated profiler", "to store the unaggregated profiling results to a csv file", "0.00003 0.252387 0.125676 2 0.00029 0.252342 0.125123 \"\"\" try: import", "t self._processing_timer.reset() def _as_first_iter_completed(self, engine: Engine) -> None: t =", "versionadded:: 0.4.6 \"\"\" EVENT_FILTER_THESHOLD_TIME = 0.0001 def __init__(self) -> None:", "if the handler was attached to event with event filter", "e: [ h.__qualname__ if hasattr(h, \"__qualname__\") else h.__class__.__name__ for (h,", "None: row_format_lst[0] += \"{: \" + text_dir + str(padding) +", "= pd.DataFrame( data=results_dump, columns=[ \"epoch\", \"iteration\", \"processing_stats\", \"dataflow_stats\", \"Event_STARTED\", \"Event_COMPLETED\",", "dataflow_stats = self.dataflow_times event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started", "1.0 1.0 0.00003 0.252387 0.125676 1.0 2.0 0.00029 0.252342 0.125123", "type: ignore[misc] append(row_format.format(*row)) append(header_sep) # print total handlers time row", "> 1: std = round(torch.std(data).item(), 5) return [total, min_index, max_index,", "\"Event_STARTED\", \"Event_COMPLETED\", \"Event_EPOCH_STARTED\", \"Event_EPOCH_COMPLETED\", \"Event_ITERATION_STARTED\", \"Event_ITERATION_COMPLETED\", \"Event_GET_BATCH_STARTED\", \"Event_GET_BATCH_COMPLETED\", ], )", "= torch.arange(self.total_num_iters, dtype=torch.float32) + 1 processing_stats = self.processing_times dataflow_stats =", "engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @staticmethod def _compute_basic_stats(data: torch.Tensor) ->", "e) for e, m in zip(self._events, self._lmethods): engine.remove_event_handler(m, e) def", "for h in self.event_handlers_times[e] ] ) total_eh_time = round(float(total_eh_time), 5)", "repr(handler) for n in [\"HandlersTimeProfiler.\", \"Timer.\"]) def _detach_profiler_handlers(self, engine: Engine)", "type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]] if len(data) > 1:", "h in self.event_handlers_times[e] ] ) total_eh_time = round(float(total_eh_time), 5) def", "should be ignite.engine.Engine, but given {type(engine)}\") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0,", "= round(torch.mean(data).item(), 5) if len(data) > 1: std = round(torch.std(data).item(),", "trainer.run(..., epoch_length=epoch_length) in order to fix this.\" ) num_iters_per_epoch =", "_, _) in engine._event_handlers[e] if \"BasicTimeProfiler.\" not in repr(h) #", "d = self._dataflow_timer.value() self.dataflow_times[i] = d self._dataflow_timer.reset() def _as_first_iter_started(self, engine:", "Union[str, float, Tuple[Union[float], Union[float]]]]] if len(data) > 1: out +=", "Any]]: \"\"\" Method to fetch the aggregated profiler results after", "dtype=torch.float32)) # Determine maximum length max_len = max([x.numel() for x", "for item in results]) + 4 # type: ignore[arg-type] event_column_width", "it profiler = BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader,", ".. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- #", "t def _as_first_get_batch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() self._dataflow_timer.reset() def", "event_completed, event_epoch_started, event_epoch_completed, event_iter_started, event_iter_completed, event_batch_started, event_batch_completed, ], dim=1, ).numpy()", "torch.stack(cols, dim=1).numpy() results_df = pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path, index=False) @staticmethod def", "Engine) -> None: self._event_handlers_timer.reset() def _as_last_get_batch_completed(self, engine: Engine) -> None:", "None: # wraps original handlers for profiling self.event_handlers_names = {", "self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t d", "t = self._processing_timer.value() self.processing_times.append(t) def _timeit_dataflow(self) -> None: # handler", "handler: Callable, event: EventEnum) -> Callable: @functools.wraps(handler) def _timeit_handler(*args: Any,", "= engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t def _as_first_epoch_completed(self, engine:", "self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t def", "1 iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1 processing_stats = self.processing_times", "header_sep = header_sep_lst[0] result = [] def append(s: str) ->", "# revert back the wrapped handlers with original handlers at", "e, m in zip(self._events, self._lmethods): engine.remove_event_handler(m, e) def attach(self, engine:", "zip(self._events, self._fmethods): engine.remove_event_handler(m, e) for e, m in zip(self._events, self._lmethods):", "| 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001 - Events.ITERATION_COMPLETED:", "], ) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: Dict) -> str:", "= [ (\"total\", torch.sum(data).item() if len(data) > 0 else \"not", "Mapping, Sequence, Tuple, Union, cast import torch from ignite.engine import", "{}s, std: {}s]\" for row in results[-2:]: row[3] = \"{}s/{}\".format(*row[3])", "python profiler.print_results(results) Examples: .. code-block:: text ---------------------------------------------------- | Time profiling", "[ h, getattr(e, \"name\", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ] for e", "in self.event_handlers_times for h in self.event_handlers_times[e] ] event_handler_stats.append([\"Total\", \"\", total_eh_time,", "_timeit_handler def _timeit_processing(self) -> None: # handler used for profiling", "{processing_stats} Dataflow: {dataflow_stats} Event handlers: {total_time:.5f} - Events.STARTED: {STARTED_names} {STARTED}", "type: Dict[EventEnum, Dict[str, List[float]]] @staticmethod def _get_callable_name(handler: Callable) -> str:", "Py3 only... SPACING_SIZE = 2 row_format_lst = [\"\"] header_sep_lst =", "None: \"\"\" Method to store the unaggregated profiling results to", "\"None\") # type: Tuple[Union[str, float], Union[str, float]] mean = \"None\"", "= t def _as_first_epoch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def", "] def _reset(self, num_epochs: int, total_num_iters: int) -> None: self.dataflow_times", "d.values()]) return out others = { k: odict_to_str(v) if isinstance(v,", "_) in engine._event_handlers[e] if \"BasicTimeProfiler.\" not in repr(h) # avoid", "m in zip(self._events, self._lmethods): engine._event_handlers[e].append((m, (engine,), {})) # Let's go", "dtype=torch.float32) + 1 processing_stats = self.processing_times dataflow_stats = self.dataflow_times event_started", "List[List[Union[str, float]]]) -> None: \"\"\" Method to print the aggregated", "self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed, self._as_last_completed, ] def _reset(self, num_epochs: int, total_num_iters:", "| 0.01258 Dataflow: 6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300", "kwargs) in enumerate(engine._event_handlers[e]): if hasattr(func, \"_profiler_original\"): engine._event_handlers[e][i] = (func._profiler_original, args,", "and attach an engine to it profiler = BasicTimeProfiler() profiler.attach(trainer)", "padding + (\" \" * SPACING_SIZE) line_length_lst[0] += padding +", "is internal return any(n in repr(handler) for n in [\"HandlersTimeProfiler.\",", "be used to profile the handlers, data loading and data", "0.00089/1943 | 0.00010 | 0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]", "List[List[Union[str, float]]]: \"\"\" Method to fetch the aggregated profiler results", "= self._dataflow_timer.value() self.dataflow_times[i] = d self._dataflow_timer.reset() def _as_first_iter_started(self, engine: Engine)", "event_handler_stats.append([\"Total\", \"\", total_eh_time, \"\", \"\", \"\", \"\"]) event_handler_stats.append([\"Processing\", \"None\", *compute_basic_stats(self.processing_times)])", "for n in [\"HandlersTimeProfiler.\", \"Timer.\"]) def _detach_profiler_handlers(self, engine: Engine) ->", "-> None: \"\"\"Attach BasicTimeProfiler to the given engine. Args: engine:", "dtype=torch.float32) cols = [processing_stats, dataflow_stats] headers = [\"processing_stats\", \"dataflow_stats\"] for", "= [] # type: List[float] self.event_handlers_times = {} # type:", "for x in cols]) count_col = torch.arange(max_len, dtype=torch.float32) + 1", "self._as_first_get_batch_started, self._as_first_get_batch_completed, self._as_first_completed, ] self._lmethods = [ self._as_last_epoch_started, self._as_last_epoch_completed, self._as_last_iter_started,", "to attach \"\"\" if not isinstance(engine, Engine): raise TypeError(f\"Argument engine", "t def _as_first_epoch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_completed(self,", "e in Events if e not in self.events_to_ignore ] +", "= [\"\"] header_sep_lst = [\"\"] line_length_lst = [-SPACING_SIZE] def add_column(padding:", "{})) @staticmethod def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float],", "= self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t", "in self.event_handlers_times[e]: headers.append(f\"{h} ({getattr(e, 'name', str(e))})\") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Determine", "a csv file Args: output_path: file output path containing a", "0.4.6 \"\"\" events_to_ignore = [ Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ]", "m in zip(self._events, self._lmethods): engine.remove_event_handler(m, e) def attach(self, engine: Engine)", "if e not in self.events_to_ignore } # Setup all other", "e, m in zip(self._events, self._fmethods): engine.remove_event_handler(m, e) for e, m", "torch.arange(max_len, dtype=torch.float32) + 1 cols.insert(0, count_col) headers.insert(0, \"#\") # pad", "e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t def _as_first_completed(self,", "to profile the handlers, events, data loading and data processing", "def write_results(self, output_path: str) -> None: \"\"\" Method to store", "code-block:: text ----------------------------------------------------------------- # processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1", "summary_format = \"{} took total {}s [min/index: {}, max/index: {},", "= self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_STARTED][i] = t", "std] event_handler_stats = [ [ h, getattr(e, \"name\", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h],", "for e in engine._allowed_events: for i, (func, args, kwargs) in", "profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- # processing_stats dataflow_stats training.<locals>.log_elapsed_time", "type: Union[str, float] if len(data) > 0: min_index = (round(torch.min(data).item(),", "to write results as files\") processing_stats = torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats", "max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type]", "line_length_lst[0] += padding + SPACING_SIZE add_column(handler_column_width, text_dir=\"<\") add_column(event_column_width, text_dir=\"<\") for", "= data[data > 0] total = round(torch.sum(data).item(), 5) if len(data)", "in zip(self._events, self._fmethods): engine._event_handlers[e].insert(0, (m, (engine,), {})) for e, m", "= Timer() self._event_handlers_timer = Timer() self.dataflow_times = torch.zeros(1) self.processing_times =", "for item in results]) + 4 # type: ignore[arg-type] DEFAULT_COLUMN_WIDTH", "Event Name Total(s) ----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results", "(round(torch.min(data).item(), 5), torch.argmin(data).item()) max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item()) mean =", "- Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010", "run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss", "Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 |", "{dataflow_stats} Event handlers: {total_time:.5f} - Events.STARTED: {STARTED_names} {STARTED} - Events.EPOCH_STARTED:", "tuple): return f\"{v[0]:.5f}/{v[1]}\" return f\"{v:.5f}\" def odict_to_str(d: Mapping) -> str:", "= self.dataflow_times event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started =", "engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t def _as_first_epoch_completed(self, engine: Engine)", "text ---------------------------------------------------- | Time profiling stats (in seconds): | ----------------------------------------------------", "mean, std] event_handler_stats = [ [ h, getattr(e, \"name\", str(e)),", "i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t d =", "store the unaggregated profiling results to a csv file Args:", "hasattr(h, \"__qualname__\") else h.__class__.__name__ for (h, _, _) in engine._event_handlers[e]", "an engine to it profiler = BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def", "{e: {h: [] for h in event_handlers_names[e]} for e in", "import functools from collections import OrderedDict from typing import Any,", "write_results(self, output_path: str) -> None: \"\"\" Method to store the", "self.event_handlers_times[e]: headers.append(f\"{h} ({getattr(e, 'name', str(e))})\") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Determine maximum", "= HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv')", "event_handlers_names[e]} for e in event_handlers_names} @staticmethod def _is_internal_handler(handler: Callable) ->", "log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" EVENT_FILTER_THESHOLD_TIME", "self._reset(self.event_handlers_names) for e in engine._allowed_events: for i, (func, args, kwargs)", "isinstance(engine, Engine): raise TypeError(f\"Argument engine should be ignite.engine.Engine, but given", "5) return [total, min_index, max_index, mean, std] event_handler_stats = [", "if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @staticmethod def _compute_basic_stats(data:", "event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started", "len(data) > 0: min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item()) max_index =", "= self._get_callable_name(handler) # filter profiled time if the handler was", "None: self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value() def _as_first_epoch_started(self, engine: Engine) -> None:", "_as_first_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_completed(self, engine: Engine)", "= max([len(item[0]) for item in results]) + 4 # type:", "sum( [ sum(self.event_handlers_times[e][h]) for e in self.event_handlers_times for h in", "None: self._event_handlers_timer.reset() def _as_last_get_batch_completed(self, engine: Engine) -> None: t =", "- 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t def _as_first_completed(self, engine: Engine) ->", "containing a filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block::", "(engine,), {})) def get_results(self) -> List[List[Union[str, float]]]: \"\"\" Method to", "ignite.handlers import BasicTimeProfiler trainer = Engine(train_updater) # Create an object", "{type(engine)}\") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @staticmethod def", "self.event_handlers_times for h in self.event_handlers_times[e] ] event_handler_stats.append([\"Total\", \"\", total_eh_time, \"\",", "# Setup all other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {})) for e,", "self._get_callable_name(h) for (h, _, _) in engine._event_handlers[e] if not self._is_internal_handler(h)", "v for k, v in results[\"event_handlers_stats\"].items() } others.update(results[\"event_handlers_names\"]) output_message =", "0.0001 def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer =", "self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed, self._as_first_completed, ] self._lmethods =", "run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED not triggered", "in results]) + 4 # type: ignore[arg-type] event_column_width = max([len(item[1])", "= self.processing_times dataflow_stats = self.dataflow_times event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed =", "= self._processing_timer.value() i = engine.state.iteration - 1 self.processing_times[i] = t", "not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @staticmethod def _compute_basic_stats(data: torch.Tensor)", "engine._event_handlers[e] if not self._is_internal_handler(h) ] for e in engine._allowed_events }", "-> bool: # checks whether the handler is internal return", "attach \"\"\" if not isinstance(engine, Engine): raise TypeError(f\"Argument engine should", "def _as_first_epoch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_started(self, engine:", "event_epoch_started, event_epoch_completed, event_iter_started, event_iter_completed, event_batch_started, event_batch_completed, ], dim=1, ).numpy() results_df", "from collections import OrderedDict from typing import Any, Callable, Dict,", "self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed, self._as_first_completed, ] self._lmethods = [ self._as_last_epoch_started, self._as_last_epoch_completed,", "back to original handler after profiling setattr(_timeit_handler, \"_profiler_original\", handler) return", "Callable) -> str: # get name of the callable handler", "= (round(torch.max(data).item(), 5), torch.argmax(data).item()) mean = round(torch.mean(data).item(), 5) if len(data)", "# type: ignore[misc] row[4] = \"{}/{}\".format(*row[4]) # type: ignore[misc] append(row_format.format(*row))", "+= padding + SPACING_SIZE add_column(handler_column_width, text_dir=\"<\") add_column(event_column_width, text_dir=\"<\") for _", "engine.state.epoch_length is None: raise ValueError( \"As epoch_length is not set,", "not in repr(h) # avoid adding internal handlers into output", "in engine._event_handlers: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if", ".. code-block:: python results = profiler.get_results() \"\"\" total_eh_time = sum(", "_timeit_dataflow(self) -> None: # handler used for profiling dataflow times", "results to a csv file Args: output_path: file output path", "= 2 row_format_lst = [\"\"] header_sep_lst = [\"\"] line_length_lst =", "torch.zeros(total_num_iters) self.event_handlers_times = { Events.STARTED: torch.zeros(1), Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs),", "List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]] if len(data) > 1: out", "\"dataflow_stats\"] for e in self.event_handlers_times: for h in self.event_handlers_times[e]: headers.append(f\"{h}", "1 processing_stats = self.processing_times dataflow_stats = self.dataflow_times event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)", "took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0, mean: 0.00602s, std:", "event: EventEnum) -> Callable: @functools.wraps(handler) def _timeit_handler(*args: Any, **kwargs: Any)", "self._dataflow_timer.value() self.dataflow_times[i] = d self._dataflow_timer.reset() def _as_first_iter_started(self, engine: Engine) ->", "= Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times =", "in self.event_handlers_times for h in self.event_handlers_times[e] ] ) total_eh_time =", "None: self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer()", "dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1 0.00003 0.252387 0.125676 2 0.00029", "{EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED}", "in results]) + 4 # type: ignore[arg-type] DEFAULT_COLUMN_WIDTH = 14", "stats (in seconds): | ---------------------------------------------------- total | min/index | max/index", "= self._dataflow_timer.value() self.dataflow_times.append(t) def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:", "the handlers, data loading and data processing times. Custom events", "[ \"Handler\", \"Event Name\", \"Total(s)\", \"Min(s)/IDX\", \"Max(s)/IDX\", \"Mean(s)\", \"Std(s)\", ]", "\"BasicTimeProfiler.\" not in repr(h) # avoid adding internal handlers into", "data[data > 0] out = [ (\"total\", torch.sum(data).item() if len(data)", "Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def _as_first_started(self, engine:", "the instance of Engine to attach \"\"\" if not isinstance(engine,", "engine._event_handlers[e] if \"BasicTimeProfiler.\" not in repr(h) # avoid adding internal", "= self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t", "# compute on non-zero data: data = data[data > 0]", "profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" EVENT_FILTER_THESHOLD_TIME = 0.0001 def __init__(self)", "Create an object of the profiler and attach an engine", "{STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} -", "for e, m in zip(self._events, self._lmethods): engine._event_handlers[e].append((m, (engine,), {})) #", "self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {})) # dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)", "def _detach_profiler_handlers(self, engine: Engine) -> None: # reverts handlers to", "type: ignore[misc] row[4] = \"{}/{}\".format(*row[4]) # type: ignore[misc] append(row_format.format(*row)) append(header_sep)", "str: \"\"\" Method to print the aggregated results from the", "Events.COMPLETED: [] not yet triggered \"\"\" def to_str(v: Union[str, tuple])", "torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1", "headers.insert(0, \"#\") # pad all tensors to have same length", "run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results", "| 0.00002 | 0.00001 - Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037 | 0.00006/866", "| 0.02693 Event handlers: 2.82721 - Events.STARTED: [] 0.00000 -", "- Events.EPOCH_STARTED: [] 0.00006 | 0.00000/0 | 0.00000/17 | 0.00000", "(\"dataflow_stats\", self._compute_basic_stats(self.dataflow_times)), (\"event_handlers_stats\", event_handlers_stats), ( \"event_handlers_names\", {str(e.name).replace(\".\", \"_\") + \"_names\":", "def compute_basic_stats( times: Union[Sequence, torch.Tensor] ) -> List[Union[str, float, Tuple[Union[str,", "0.00790 - Events.COMPLETED: [] not yet triggered \"\"\" def to_str(v:", "self._event_handlers_timer = Timer() self.dataflow_times = torch.zeros(1) self.processing_times = torch.zeros(1) self.event_handlers_times", "(\"std\", torch.std(data).item()), ] return OrderedDict(out) def get_results(self) -> Dict[str, Dict[str,", "5), torch.argmin(data).item()) max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item()) mean = round(torch.mean(data).item(),", "\"\"\"Attach HandlersTimeProfiler to the given engine. Args: engine: the instance", "Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_started(self, engine: Engine) -> None:", "v in results[\"event_handlers_stats\"].items() } others.update(results[\"event_handlers_names\"]) output_message = \"\"\" ---------------------------------------------------- |", "from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple,", "self._is_internal_handler(func): engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs) # processing timer", "engine.state.iteration - 1 self.processing_times[i] = t self._event_handlers_timer.reset() def _as_last_iter_completed(self, engine:", "self.event_handlers_times: for h in self.event_handlers_times[e]: headers.append(f\"{h} ({getattr(e, 'name', str(e))})\") cols.append(torch.tensor(self.event_handlers_times[e][h],", "= [] def append(s: str) -> None: result.append(s) result.append(\"\\n\") result.append(\"\\n\")", "= [ Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ] def __init__(self) ->", "self._as_last_iter_started, self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed, self._as_last_completed, ] def _reset(self, num_epochs: int,", "(\" \" * SPACING_SIZE) line_length_lst[0] += padding + SPACING_SIZE add_column(handler_column_width,", "self.dataflow_times.append(t) def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None: # reset", "# Let's go self._event_handlers_timer.reset() def _as_last_started(self, engine: Engine) -> None:", "an engine to it profiler = HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def", "float, Tuple[Union[float], Union[float]]]]: # compute on non-zero data: data =", "... Handler Event Name Total(s) ----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED", "e in Events if e not in self.events_to_ignore } #", "\"Mean(s)\", \"Std(s)\", ] # Have to use a list because", "[\"\"] line_length_lst = [-SPACING_SIZE] def add_column(padding: int, text_dir: str =", "engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_iter_started(self, engine: Engine) ->", "type: ignore[arg-type] else: if engine.state.epoch_length is None: raise ValueError( \"As", "+ [(\"total_time\", total_eh_time)] # type: ignore[list-item] ) return OrderedDict( [", "return OrderedDict( [ (\"processing_stats\", self._compute_basic_stats(self.processing_times)), (\"dataflow_stats\", self._compute_basic_stats(self.dataflow_times)), (\"event_handlers_stats\", event_handlers_stats), (", "cols] results_dump = torch.stack(cols, dim=1).numpy() results_df = pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path,", ") # type: Union[int, torch.Tensor] event_handlers_stats = dict( [ (str(e.name).replace(\".\",", "took total {}s [min/index: {}, max/index: {}, mean: {}s, std:", "HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') ..", "add_column(event_column_width, text_dir=\"<\") for _ in headers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_format = row_format_lst[0]", ") print(output_message) return output_message class HandlersTimeProfiler: \"\"\" HandlersTimeProfiler can be", "| 0.14977/13 | 0.12893 | 0.00790 - Events.COMPLETED: [] not", "the profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block:: text", "not in self.events_to_ignore ] + [(\"total_time\", total_eh_time)] # type: ignore[list-item]", "# type: ignore[misc] row[4] = \"{}s/{}\".format(*row[4]) # type: ignore[misc] del", "e in self.event_handlers_times for h in self.event_handlers_times[e] ] event_handler_stats.append([\"Total\", \"\",", "\"Event_GET_BATCH_COMPLETED\", ], ) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: Dict) ->", "set trainer.run(..., epoch_length=epoch_length) in order to fix this.\" ) num_iters_per_epoch", "engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {})) for e, m in zip(self._events, self._fmethods): engine._event_handlers[e].insert(0,", "total_num_iters: int) -> None: self.dataflow_times = torch.zeros(total_num_iters) self.processing_times = torch.zeros(total_num_iters)", "[ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED, ] self._fmethods", "-> str: if isinstance(v, str): return v elif isinstance(v, tuple):", "dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32) cols = [processing_stats, dataflow_stats] headers =", "data loading and data processing times. Custom events are also", "@staticmethod def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]:", "self._compute_basic_stats(self.dataflow_times)), (\"event_handlers_stats\", event_handlers_stats), ( \"event_handlers_names\", {str(e.name).replace(\".\", \"_\") + \"_names\": v", "get_results(self) -> Dict[str, Dict[str, Any]]: \"\"\" Method to fetch the", "DEFAULT_COLUMN_WIDTH = 14 headers = [ \"Handler\", \"Event Name\", \"Total(s)\",", "----------------------- -------------- ... Handler Event Name Total(s) ----------------------------------------- ----------------------- --------------", "total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0, mean: 0.00602s, std: 0.00034s]", "{}s]\" for row in results[-2:]: row[3] = \"{}s/{}\".format(*row[3]) # type:", "{EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names} {COMPLETED} \"\"\".format( processing_stats=odict_to_str(results[\"processing_stats\"]), dataflow_stats=odict_to_str(results[\"dataflow_stats\"]), **others,", "index=False) @staticmethod def print_results(results: List[List[Union[str, float]]]) -> None: \"\"\" Method", "profiling self.event_handlers_names = { e: [ self._get_callable_name(h) for (h, _,", "kwargs) in enumerate(engine._event_handlers[e]): if not self._is_internal_handler(func): engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e),", "0.252387 0.125676 1.0 2.0 0.00029 0.252342 0.125123 \"\"\" try: import", "mode=\"constant\", value=0) for x in cols] results_dump = torch.stack(cols, dim=1).numpy()", "# Remove added handlers: engine.remove_event_handler(self._as_last_started, Events.STARTED) for e, m in", "# type: Dict[EventEnum, torch.Tensor] self._events = [ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED,", "adding internal handlers into output ] for e in Events", "Args: engine: the instance of Engine to attach \"\"\" if", "----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED", "print the aggregated results from the profiler Args: results: the", "+ SPACING_SIZE add_column(handler_column_width, text_dir=\"<\") add_column(event_column_width, text_dir=\"<\") for _ in headers[2:]:", "in results[:-3]: # format min/idx and max/idx row[3] = \"{}/{}\".format(*row[3])", "self._event_handlers_timer.value() # Remove added handlers: engine.remove_event_handler(self._as_last_started, Events.STARTED) for e, m", "raise ValueError( \"As epoch_length is not set, we can not", "in results[\"event_handlers_stats\"].items() } others.update(results[\"event_handlers_names\"]) output_message = \"\"\" ---------------------------------------------------- | Time", "getattr(e, \"name\", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ] for e in self.event_handlers_times", "// self.max_epochs epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 iterations =", "dataflow_stats Event_STARTED ... 1.0 1.0 0.00003 0.252387 0.125676 1.0 2.0", "epoch_length=epoch_length) in order to fix this.\" ) num_iters_per_epoch = engine.state.epoch_length", "from the profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block::", "results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: Dict) -> str: \"\"\" Method", "engine: Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration", "handlers: engine.remove_event_handler(self._as_last_started, Events.STARTED) for e, m in zip(self._events, self._fmethods): engine.remove_event_handler(m,", "for _ in headers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_format = row_format_lst[0] header_sep =", "\"dataflow_stats\", \"Event_STARTED\", \"Event_COMPLETED\", \"Event_EPOCH_STARTED\", \"Event_EPOCH_COMPLETED\", \"Event_ITERATION_STARTED\", \"Event_ITERATION_COMPLETED\", \"Event_GET_BATCH_STARTED\", \"Event_GET_BATCH_COMPLETED\", ],", "self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value() # Remove added handlers: engine.remove_event_handler(self._as_last_started, Events.STARTED) for", "all tensors to have same length cols = [torch.nn.functional.pad(x, pad=(0,", "text_dir: str = \">\") -> None: row_format_lst[0] += \"{: \"", "pad=(0, max_len - x.numel()), mode=\"constant\", value=0) for x in cols]", "= { k: odict_to_str(v) if isinstance(v, OrderedDict) else v for", "-> None: # handler used for profiling dataflow times t", "Tuple, Union, cast import torch from ignite.engine import Engine, EventEnum,", "[\"processing_stats\", \"dataflow_stats\"] for e in self.event_handlers_times: for h in self.event_handlers_times[e]:", "Any) -> None: self._event_handlers_timer.reset() handler(*args, **kwargs) t = self._event_handlers_timer.value() hname", "| 0.00010 | 0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860", "filter if not hasattr(handler, \"_parent\") or t >= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t)", "{ k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k,", "headers = [ \"Handler\", \"Event Name\", \"Total(s)\", \"Min(s)/IDX\", \"Max(s)/IDX\", \"Mean(s)\",", "= \"{} took total {}s [min/index: {}, max/index: {}, mean:", "revert back the wrapped handlers with original handlers at the", "self._compute_basic_stats(self.event_handlers_times[e])) for e in Events if e not in self.events_to_ignore", "profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" events_to_ignore =", "0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003 -", "for e in engine._event_handlers: for i, (func, args, kwargs) in", "-> None: self._event_handlers_timer.reset() def _as_last_epoch_started(self, engine: Engine) -> None: t", "case.\" \"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this.\"", "-> None: \"\"\" Method to print the aggregated results from", "0.00866s, std: 0.00113s] \"\"\" # adopted implementation of torch.autograd.profiler.build_table handler_column_width", ".. versionadded:: 0.4.6 \"\"\" events_to_ignore = [ Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH,", "self._as_last_completed, ] def _reset(self, num_epochs: int, total_num_iters: int) -> None:", "BasicTimeProfiler in this case.\" \"Please, set trainer.run(..., epoch_length=epoch_length) in order", "output_path: str) -> None: \"\"\" Method to store the unaggregated", "| min/index | max/index | mean | std Processing function:", "for e, m in zip(self._events, self._lmethods): engine.remove_event_handler(m, e) def attach(self,", "round(torch.mean(data).item(), 5) if len(data) > 1: std = round(torch.std(data).item(), 5)", "_as_first_started(self, engine: Engine) -> None: if hasattr(engine.state.dataloader, \"__len__\"): num_iters_per_epoch =", "event_handlers_stats), ( \"event_handlers_names\", {str(e.name).replace(\".\", \"_\") + \"_names\": v for e,", "List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]: data = torch.as_tensor(times, dtype=torch.float32)", "Custom events are also profiled by this profiler Examples: ..", "if len(data) > 1: std = round(torch.std(data).item(), 5) return [total,", "use BasicTimeProfiler in this case.\" \"Please, set trainer.run(..., epoch_length=epoch_length) in", "code-block:: python profiler.print_results(results) Examples: .. code-block:: text ---------------------------------------------------- | Time", "None: self.dataflow_times = torch.zeros(total_num_iters) self.processing_times = torch.zeros(total_num_iters) self.event_handlers_times = {", "mean | std Processing function: {processing_stats} Dataflow: {dataflow_stats} Event handlers:", "for e in event_handlers_names} @staticmethod def _is_internal_handler(handler: Callable) -> bool:", "self._processing_timer.reset() def _as_first_iter_completed(self, engine: Engine) -> None: t = self._processing_timer.value()", "1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t def _as_first_completed(self, engine: Engine) -> None:", "0.26905/0 | 0.07730 | 0.01258 Dataflow: 6.11384 | 0.00008/1935 |", "run.<locals>.log_time COMPLETED not triggered ----------------------------------------- ----------------------- -------------- Total 22.04571 -----------------------------------------", "*compute_basic_stats(self.dataflow_times)]) return event_handler_stats def write_results(self, output_path: str) -> None: \"\"\"", "processing times. Custom events are also profiled by this profiler", "Engine) -> None: self._event_handlers_timer.reset() self._dataflow_timer.reset() def _as_last_get_batch_started(self, engine: Engine) ->", "{COMPLETED} \"\"\".format( processing_stats=odict_to_str(results[\"processing_stats\"]), dataflow_stats=odict_to_str(results[\"dataflow_stats\"]), **others, ) print(output_message) return output_message class", "+ \"_names\": v for e, v in self.event_handlers_names.items()}, ), ]", "\"_profiler_original\", handler) return _timeit_handler def _timeit_processing(self) -> None: # handler", "max/index: 0.01129s/937, mean: 0.00866s, std: 0.00113s] \"\"\" # adopted implementation", "results as files\") processing_stats = torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats = torch.tensor(self.dataflow_times,", "is Py3 only... SPACING_SIZE = 2 row_format_lst = [\"\"] header_sep_lst", "{})) for e, m in zip(self._events, self._lmethods): engine._event_handlers[e].append((m, (engine,), {}))", "157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258 Dataflow:", "data processing times. Examples: .. code-block:: python from ignite.handlers import", "self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]", "@staticmethod def print_results(results: List[List[Union[str, float]]]) -> None: \"\"\" Method to", "= [ self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed, self._as_first_completed, ]", "profiler and attach an engine to it profiler = BasicTimeProfiler()", "1 cols.insert(0, count_col) headers.insert(0, \"#\") # pad all tensors to", "_as_last_epoch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() e =", "event_column_width = max([len(item[1]) for item in results]) + 4 #", "h in self.event_handlers_times[e]: headers.append(f\"{h} ({getattr(e, 'name', str(e))})\") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) #", "Callable: @functools.wraps(handler) def _timeit_handler(*args: Any, **kwargs: Any) -> None: self._event_handlers_timer.reset()", "# type: Union[str, float] std = \"None\" # type: Union[str,", "= torch.zeros(total_num_iters) self.event_handlers_times = { Events.STARTED: torch.zeros(1), Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED:", "\"\", \"\"]) event_handler_stats.append([\"Processing\", \"None\", *compute_basic_stats(self.processing_times)]) event_handler_stats.append([\"Dataflow\", \"None\", *compute_basic_stats(self.dataflow_times)]) return event_handler_stats", "= cast(int, engine.state.max_epochs) self.total_num_iters = self.max_epochs * num_iters_per_epoch self._reset(self.max_epochs, self.total_num_iters)", "\"__qualname__\", handler.__class__.__name__) def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:", "handler is internal return any(n in repr(handler) for n in", "torch.zeros(1) self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor] self._events =", "line_length_lst = [-SPACING_SIZE] def add_column(padding: int, text_dir: str = \">\")", "... 1.0 1.0 0.00003 0.252387 0.125676 1.0 2.0 0.00029 0.252342", "0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 | 0.00001/188 | 0.00018/679 |", "results_df = pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: List[List[Union[str,", "event_batch_started, event_batch_completed, ], dim=1, ).numpy() results_df = pd.DataFrame( data=results_dump, columns=[", "[] # type: List[float] self.event_handlers_times = {} # type: Dict[EventEnum,", "if not isinstance(engine, Engine): raise TypeError(f\"Argument engine should be ignite.engine.Engine,", "0.11529/0 | 0.14977/13 | 0.12893 | 0.00790 - Events.COMPLETED: []", "results]) + 4 # type: ignore[arg-type] DEFAULT_COLUMN_WIDTH = 14 headers", "max/index | mean | std Processing function: {processing_stats} Dataflow: {dataflow_stats}", "\"not triggered\" # type: Union[str, float] min_index = (\"None\", \"None\")", "after the engine is run .. code-block:: python results =", "{} # type: Dict[EventEnum, Dict[str, List[float]]] @staticmethod def _get_callable_name(handler: Callable)", "data: data = data[data > 0] out = [ (\"total\",", "\"Event Name\", \"Total(s)\", \"Min(s)/IDX\", \"Max(s)/IDX\", \"Mean(s)\", \"Std(s)\", ] # Have", "(func, args, kwargs) in enumerate(engine._event_handlers[e]): if hasattr(func, \"_profiler_original\"): engine._event_handlers[e][i] =", "of Engine to attach \"\"\" if not isinstance(engine, Engine): raise", "profiler results after the engine is run .. code-block:: python", "python from ignite.handlers import HandlersTimeProfiler trainer = Engine(train_updater) # Create", "max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" events_to_ignore = [ Events.EXCEPTION_RAISED,", "d self._dataflow_timer.reset() def _as_first_iter_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def", "-> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]: data = torch.as_tensor(times,", "versionadded:: 0.4.6 \"\"\" events_to_ignore = [ Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION,", "[(\"total_time\", total_eh_time)] # type: ignore[list-item] ) return OrderedDict( [ (\"processing_stats\",", "= t self._event_handlers_timer.reset() def _as_last_iter_completed(self, engine: Engine) -> None: t", "handlers, data loading and data processing times. Custom events are", "data processing times. Custom events are also profiled by this", "[] self.processing_times = [] self.event_handlers_times = {e: {h: [] for", "filter profiled time if the handler was attached to event", "processing_stats dataflow_stats Event_STARTED ... 1.0 1.0 0.00003 0.252387 0.125676 1.0", "\"name\", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ] for e in self.event_handlers_times for", "Any, **kwargs: Any) -> None: self._event_handlers_timer.reset() handler(*args, **kwargs) t =", "pad all tensors to have same length cols = [torch.nn.functional.pad(x,", "of torch.autograd.profiler.build_table handler_column_width = max([len(item[0]) for item in results]) +", "dataflow_stats=odict_to_str(results[\"dataflow_stats\"]), **others, ) print(output_message) return output_message class HandlersTimeProfiler: \"\"\" HandlersTimeProfiler", "handler after profiling setattr(_timeit_handler, \"_profiler_original\", handler) return _timeit_handler def _timeit_processing(self)", "str(padding) + \"}\" + (\" \" * SPACING_SIZE) header_sep_lst[0] +=", "from ignite.engine import Engine, EventEnum, Events from ignite.handlers.timing import Timer", "dtype=torch.float32) # compute on non-zero data: data = data[data >", "Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937, mean: 0.00866s,", "(\" \" * SPACING_SIZE) header_sep_lst[0] += \"-\" * padding +", "[\"HandlersTimeProfiler.\", \"Timer.\"]) def _detach_profiler_handlers(self, engine: Engine) -> None: # reverts", "= sum( [ sum(self.event_handlers_times[e][h]) for e in self.event_handlers_times for h", "- 1 self.processing_times[i] = t self._event_handlers_timer.reset() def _as_last_iter_completed(self, engine: Engine)", "1 self.processing_times[i] = t self._event_handlers_timer.reset() def _as_last_iter_completed(self, engine: Engine) ->", "Method to store the unaggregated profiling results to a csv", "\"None\", *compute_basic_stats(self.dataflow_times)]) return event_handler_stats def write_results(self, output_path: str) -> None:", "index=False) @staticmethod def print_results(results: Dict) -> str: \"\"\" Method to", "processing_stats = torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32) cols =", "# format min/idx and max/idx row[3] = \"{}/{}\".format(*row[3]) # type:", "handler(*args, **kwargs) t = self._event_handlers_timer.value() hname = self._get_callable_name(handler) # filter", "cast(int, engine.state.max_epochs) self.total_num_iters = self.max_epochs * num_iters_per_epoch self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names", "\"__len__\"): num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type] else: if engine.state.epoch_length", "dtype=torch.float32) dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32) cols = [processing_stats, dataflow_stats] headers", "python results = profiler.get_results() \"\"\" total_eh_time = sum( [(self.event_handlers_times[e]).sum() for", "# Determine maximum length max_len = max([x.numel() for x in", "[ epochs, iterations, processing_stats, dataflow_stats, event_started, event_completed, event_epoch_started, event_epoch_completed, event_iter_started,", "def _as_last_iter_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i", "i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t def _as_first_epoch_completed(self,", "Event handlers: {total_time:.5f} - Events.STARTED: {STARTED_names} {STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names}", "results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: List[List[Union[str, float]]]) -> None: \"\"\"", "0.252387 0.125676 2 0.00029 0.252342 0.125123 \"\"\" try: import pandas", "self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]", "log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" events_to_ignore", ".. versionadded:: 0.4.6 \"\"\" EVENT_FILTER_THESHOLD_TIME = 0.0001 def __init__(self) ->", "5) def compute_basic_stats( times: Union[Sequence, torch.Tensor] ) -> List[Union[str, float,", "t = self._dataflow_timer.value() self.dataflow_times.append(t) def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) ->", "the aggregated results from the profiler .. code-block:: python profiler.print_results(results)", "in event_handlers_names[e]} for e in event_handlers_names} @staticmethod def _is_internal_handler(handler: Callable)", "Events from ignite.handlers.timing import Timer class BasicTimeProfiler: \"\"\" BasicTimeProfiler can", "in Events if e not in self.events_to_ignore } # Setup", "profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block:: text ----------------------------------------------------", "engine._event_handlers[e][i] = (func._profiler_original, args, kwargs) def _as_first_started(self, engine: Engine) ->", "print total handlers time row append(row_format.format(*results[-3])) append(header_sep) summary_format = \"{}", "reset the variables used for profiling self.dataflow_times = [] self.processing_times", "\"None\") # type: Tuple[Union[str, float], Union[str, float]] max_index = (\"None\",", "dtype=torch.float32)), ] for e in self.event_handlers_times for h in self.event_handlers_times[e]", "None: result.append(s) result.append(\"\\n\") result.append(\"\\n\") append(header_sep) append(row_format.format(*headers)) append(header_sep) for row in", "engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @staticmethod def _compute_basic_stats(data: torch.Tensor) -> Dict[str,", "Method to fetch the aggregated profiler results after the engine", "float, Tuple[Union[str, float], Union[str, float]]]]: data = torch.as_tensor(times, dtype=torch.float32) #", "= torch.stack( [ epochs, iterations, processing_stats, dataflow_stats, event_started, event_completed, event_epoch_started,", "event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump", "int, total_num_iters: int) -> None: self.dataflow_times = torch.zeros(total_num_iters) self.processing_times =", "| 0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 | 0.00001/188 | 0.00018/679", "= Timer() self.dataflow_times = torch.zeros(1) self.processing_times = torch.zeros(1) self.event_handlers_times =", "= torch.zeros(1) self.processing_times = torch.zeros(1) self.event_handlers_times = {} # type:", "+ 1 iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1 processing_stats =", "round(torch.sum(data).item(), 5) if len(data) > 0 else \"not triggered\" #", "add_column(DEFAULT_COLUMN_WIDTH) row_format = row_format_lst[0] header_sep = header_sep_lst[0] result = []", "None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i]", "and data processing times. Examples: .. code-block:: python from ignite.handlers", "-> None: # reset the variables used for profiling self.dataflow_times", "\"\"\" def to_str(v: Union[str, tuple]) -> str: if isinstance(v, str):", "self.max_epochs = cast(int, engine.state.max_epochs) self.total_num_iters = self.max_epochs * num_iters_per_epoch self._reset(self.max_epochs,", "+= [ (\"min/index\", (torch.min(data).item(), torch.argmin(data).item())), (\"max/index\", (torch.max(data).item(), torch.argmax(data).item())), (\"mean\", torch.mean(data).item()),", "pandas to write results as files\") iters_per_epoch = self.total_num_iters //", "Args: output_path: file output path containing a filename .. code-block::", "processing timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {})) # dataflow", "def _as_first_started(self, engine: Engine) -> None: if hasattr(engine.state.dataloader, \"__len__\"): num_iters_per_epoch", "import HandlersTimeProfiler trainer = Engine(train_updater) # Create an object of", "self.dataflow_times[i] = d self._dataflow_timer.reset() def _as_first_iter_started(self, engine: Engine) -> None:", "times: Union[Sequence, torch.Tensor] ) -> List[Union[str, float, Tuple[Union[str, float], Union[str,", "torch.argmin(data).item())), (\"max/index\", (torch.max(data).item(), torch.argmax(data).item())), (\"mean\", torch.mean(data).item()), (\"std\", torch.std(data).item()), ] return", "self._dataflow_timer.reset() def _as_first_iter_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_iter_started(self,", "item in results]) + 4 # type: ignore[arg-type] DEFAULT_COLUMN_WIDTH =", "= [\"\"] line_length_lst = [-SPACING_SIZE] def add_column(padding: int, text_dir: str", "Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_completed(self, engine: Engine) -> None:", "v elif isinstance(v, tuple): return f\"{v[0]:.5f}/{v[1]}\" return f\"{v:.5f}\" def odict_to_str(d:", "profiled time if the handler was attached to event with", "Union[str, float] std = \"None\" # type: Union[str, float] if", "Callable, event: EventEnum) -> Callable: @functools.wraps(handler) def _timeit_handler(*args: Any, **kwargs:", "event_batch_completed, ], dim=1, ).numpy() results_df = pd.DataFrame( data=results_dump, columns=[ \"epoch\",", "data = data[data > 0] out = [ (\"total\", torch.sum(data).item()", "results from the profiler .. code-block:: python profiler.print_results(results) Examples: ..", "\"_\") + \"_names\": v for e, v in self.event_handlers_names.items()}, ),", "std: 0.00034s] Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937,", "= self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_STARTED][e] = t", "= engine.state.epoch_length self.max_epochs = cast(int, engine.state.max_epochs) self.total_num_iters = self.max_epochs *", "handlers at the end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self, engine: Engine)", "count_col = torch.arange(max_len, dtype=torch.float32) + 1 cols.insert(0, count_col) headers.insert(0, \"#\")", ") return OrderedDict( [ (\"processing_stats\", self._compute_basic_stats(self.processing_times)), (\"dataflow_stats\", self._compute_basic_stats(self.dataflow_times)), (\"event_handlers_stats\", event_handlers_stats),", "use a list because nonlocal is Py3 only... SPACING_SIZE =", "-> None: \"\"\"Attach HandlersTimeProfiler to the given engine. Args: engine:", "-> None: if hasattr(engine.state.dataloader, \"__len__\"): num_iters_per_epoch = len(engine.state.dataloader) # type:", "with original handlers at the end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self,", "SPACING_SIZE) header_sep_lst[0] += \"-\" * padding + (\" \" *", "event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump = torch.stack( [ epochs, iterations, processing_stats,", "engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_STARTED][e] = t def _as_first_get_batch_started(self, engine: Engine)", "-> str: out = \" | \".join([to_str(v) for v in", "0.4.6 \"\"\" EVENT_FILTER_THESHOLD_TIME = 0.0001 def __init__(self) -> None: self._dataflow_timer", "can be used to profile the handlers, events, data loading", "> 0] out = [ (\"total\", torch.sum(data).item() if len(data) >", "profiler Args: results: the aggregated results from the profiler ..", "- 1 self.event_handlers_times[Events.ITERATION_STARTED][i] = t self._processing_timer.reset() def _as_first_iter_completed(self, engine: Engine)", "---------------------------------------------------- total | min/index | max/index | mean | std", "results = profiler.get_results() \"\"\" total_eh_time = sum( [ sum(self.event_handlers_times[e][h]) for", "List, Mapping, Sequence, Tuple, Union, cast import torch from ignite.engine", "= [-SPACING_SIZE] def add_column(padding: int, text_dir: str = \">\") ->", ">= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t) # required to revert back to original", "out += [ (\"min/index\", (torch.min(data).item(), torch.argmin(data).item())), (\"max/index\", (torch.max(data).item(), torch.argmax(data).item())), (\"mean\",", "1.0 2.0 0.00029 0.252342 0.125123 \"\"\" try: import pandas as", "\"None\" # type: Union[str, float] if len(data) > 0: min_index", "Engine) -> None: self._event_handlers_timer.reset() def _as_last_completed(self, engine: Engine) -> None:", "----------------------------------------- ----------------------- -------------- ... Handler Event Name Total(s) ----------------------------------------- -----------------------", "e in self.event_handlers_times: for h in self.event_handlers_times[e]: headers.append(f\"{h} ({getattr(e, 'name',", "Events.STARTED) for e, m in zip(self._events, self._fmethods): engine.remove_event_handler(m, e) for", "16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937, mean: 0.00866s, std: 0.00113s] \"\"\"", "profiled by this profiler Examples: .. code-block:: python from ignite.handlers", "Sequence, Tuple, Union, cast import torch from ignite.engine import Engine,", "self._get_callable_name(handler) # filter profiled time if the handler was attached", "engine is run .. code-block:: python results = profiler.get_results() \"\"\"", "i = engine.state.iteration - 1 self.processing_times[i] = t self._event_handlers_timer.reset() def", "_detach_profiler_handlers(self, engine: Engine) -> None: # reverts handlers to original", "float], Union[str, float]]]]: data = torch.as_tensor(times, dtype=torch.float32) # compute on", "_is_internal_handler(handler: Callable) -> bool: # checks whether the handler is", "float]]]) -> None: \"\"\" Method to print the aggregated results", "| \".join([to_str(v) for v in d.values()]) return out others =", "= (func._profiler_original, args, kwargs) def _as_first_started(self, engine: Engine) -> None:", "_reset(self, num_epochs: int, total_num_iters: int) -> None: self.dataflow_times = torch.zeros(total_num_iters)", "e) def attach(self, engine: Engine) -> None: \"\"\"Attach BasicTimeProfiler to", "non-zero data: data = data[data > 0] out = [", "run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time", "- x.numel()), mode=\"constant\", value=0) for x in cols] results_dump =", "h.__class__.__name__ for (h, _, _) in engine._event_handlers[e] if \"BasicTimeProfiler.\" not", "if not self._is_internal_handler(func): engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs) #", "None: \"\"\" Method to print the aggregated results from the", "if hasattr(engine.state.dataloader, \"__len__\"): num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type] else:", "({getattr(e, 'name', str(e))})\") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Determine maximum length max_len", "Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860 | 0.11529/0 | 0.14977/13 |", "str: if isinstance(v, str): return v elif isinstance(v, tuple): return", "class HandlersTimeProfiler: \"\"\" HandlersTimeProfiler can be used to profile the", "| 0.12893 | 0.00790 - Events.COMPLETED: [] not yet triggered", "----------------------------------------- ----------------------- -------------- Processing took total 11.29543s [min/index: 0.00393s/1875, max/index:", "zip(self._events, self._fmethods): engine._event_handlers[e].insert(0, (m, (engine,), {})) for e, m in", "self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t def _as_first_get_batch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset()", "can not use BasicTimeProfiler in this case.\" \"Please, set trainer.run(...,", "torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED:", "= self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump = torch.stack( [ epochs, iterations, processing_stats, dataflow_stats,", "in zip(self._events, self._lmethods): engine.remove_event_handler(m, e) def attach(self, engine: Engine) ->", "-> None: self._event_handlers_timer.reset() def _as_last_completed(self, engine: Engine) -> None: self.event_handlers_times[Events.COMPLETED][0]", "Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]: # compute on non-zero data:", "+ 4 # type: ignore[arg-type] DEFAULT_COLUMN_WIDTH = 14 headers =", "total_eh_time = sum( [ sum(self.event_handlers_times[e][h]) for e in self.event_handlers_times for", "in enumerate(engine._event_handlers[e]): if not self._is_internal_handler(func): engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args,", "(self._as_first_started, (engine,), {})) @staticmethod def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str,", "OrderedDict( [ (\"processing_stats\", self._compute_basic_stats(self.processing_times)), (\"dataflow_stats\", self._compute_basic_stats(self.dataflow_times)), (\"event_handlers_stats\", event_handlers_stats), ( \"event_handlers_names\",", "engine._allowed_events: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if not", "= 14 headers = [ \"Handler\", \"Event Name\", \"Total(s)\", \"Min(s)/IDX\",", "Union[int, torch.Tensor] event_handlers_stats = dict( [ (str(e.name).replace(\".\", \"_\"), self._compute_basic_stats(self.event_handlers_times[e])) for", "original handlers for e in engine._event_handlers: for i, (func, args,", "self._as_last_get_batch_started, self._as_last_get_batch_completed, self._as_last_completed, ] def _reset(self, num_epochs: int, total_num_iters: int)", "out = [ (\"total\", torch.sum(data).item() if len(data) > 0 else", "event_handler_stats def write_results(self, output_path: str) -> None: \"\"\" Method to", "= max([len(item[1]) for item in results]) + 4 # type:", "length cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode=\"constant\", value=0)", "(self._timeit_dataflow, (), {})) # revert back the wrapped handlers with", "float]]]]: data = torch.as_tensor(times, dtype=torch.float32) # compute on non-zero data:", "Examples: .. code-block:: python from ignite.handlers import BasicTimeProfiler trainer =", "_as_first_iter_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_iter_started(self, engine: Engine)", "0.00602s, std: 0.00034s] Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index:", "Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}", "self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed, self._as_first_completed, ] self._lmethods = [", "Mapping[EventEnum, List[str]]) -> None: # reset the variables used for", "e in event_handlers_names} @staticmethod def _is_internal_handler(handler: Callable) -> bool: #", "= round(torch.std(data).item(), 5) return [total, min_index, max_index, mean, std] event_handler_stats", "self._as_first_iter_started, self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed, self._as_first_completed, ] self._lmethods = [ self._as_last_epoch_started,", "4 # type: ignore[arg-type] event_column_width = max([len(item[1]) for item in", "Tuple[Union[float], Union[float]]]]] if len(data) > 1: out += [ (\"min/index\",", "\"\"\" HandlersTimeProfiler can be used to profile the handlers, data", "def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable: @functools.wraps(handler) def", "\" + text_dir + str(padding) + \"}\" + (\" \"", "are also profiled by this profiler Examples: .. code-block:: python", "for e, v in self.event_handlers_names.items()}, ), ] ) def write_results(self,", "pandas as pd except ImportError: raise RuntimeError(\"Need pandas to write", "from ignite.handlers import HandlersTimeProfiler trainer = Engine(train_updater) # Create an", "def _as_last_completed(self, engine: Engine) -> None: self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value() #", "timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {})) # revert back", "None: self._event_handlers_timer.reset() def _as_last_epoch_completed(self, engine: Engine) -> None: t =", "cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)) # Determine maximum length max_len = max([x.numel() for", "not self._is_internal_handler(func): engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs) # processing", "engine: Engine) -> None: # reverts handlers to original handlers", "result.append(s) result.append(\"\\n\") result.append(\"\\n\") append(header_sep) append(row_format.format(*headers)) append(header_sep) for row in results[:-3]:", "= Timer() self._event_handlers_timer = Timer() self.dataflow_times = [] # type:", "{ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} -", "-> None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1", "\"Event_EPOCH_STARTED\", \"Event_EPOCH_COMPLETED\", \"Event_ITERATION_STARTED\", \"Event_ITERATION_COMPLETED\", \"Event_GET_BATCH_STARTED\", \"Event_GET_BATCH_COMPLETED\", ], ) results_df.to_csv(output_path, index=False)", "2.82721 - Events.STARTED: [] 0.00000 - Events.EPOCH_STARTED: [] 0.00006 |", "def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None: # reset the", "engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_STARTED][i] = t self._processing_timer.reset() def _as_first_iter_completed(self, engine:", "total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]\"", "Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def _as_first_started(self, engine: Engine) ->", "= self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump =", "= (round(torch.min(data).item(), 5), torch.argmin(data).item()) max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item()) mean", "self.event_handlers_times[e] ] event_handler_stats.append([\"Total\", \"\", total_eh_time, \"\", \"\", \"\", \"\"]) event_handler_stats.append([\"Processing\",", "[-SPACING_SIZE] def add_column(padding: int, text_dir: str = \">\") -> None:", "| 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003 - Events.EPOCH_COMPLETED:", "\"}\" + (\" \" * SPACING_SIZE) header_sep_lst[0] += \"-\" *", "self.dataflow_times = torch.zeros(1) self.processing_times = torch.zeros(1) self.event_handlers_times = {} #", "tuple]) -> str: if isinstance(v, str): return v elif isinstance(v,", "def _as_first_get_batch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() self._dataflow_timer.reset() def _as_last_get_batch_started(self,", "pd except ImportError: raise RuntimeError(\"Need pandas to write results as", "Events.EPOCH_STARTED: [] 0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 |", "handlers: 2.82721 - Events.STARTED: [] 0.00000 - Events.EPOCH_STARTED: [] 0.00006", "float]] mean = \"None\" # type: Union[str, float] std =", "torch.Tensor] event_handlers_stats = dict( [ (str(e.name).replace(\".\", \"_\"), self._compute_basic_stats(self.event_handlers_times[e])) for e", "to a csv file Args: output_path: file output path containing", "engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_completed(self, engine: Engine) ->", "HandlersTimeProfiler can be used to profile the handlers, data loading", "ImportError: raise RuntimeError(\"Need pandas to write results as files\") processing_stats", "0.00000/0 | 0.00000/17 | 0.00000 | 0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear']", "in [\"HandlersTimeProfiler.\", \"Timer.\"]) def _detach_profiler_handlers(self, engine: Engine) -> None: #", "self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump = torch.stack(", "Dict[EventEnum, Dict[str, List[float]]] @staticmethod def _get_callable_name(handler: Callable) -> str: #", "# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1 0.00003 0.252387 0.125676", "EventEnum) -> Callable: @functools.wraps(handler) def _timeit_handler(*args: Any, **kwargs: Any) ->", "| max/index | mean | std Processing function: 157.46292 |", "Dataflow: 6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693", "Examples: .. code-block:: text ----------------------------------------------------------------- # processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED)", "self.event_handlers_times = { Events.STARTED: torch.zeros(1), Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED:", "0.02693 Event handlers: 2.82721 - Events.STARTED: [] 0.00000 - Events.EPOCH_STARTED:", "t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] =", "torch.argmax(data).item())), (\"mean\", torch.mean(data).item()), (\"std\", torch.std(data).item()), ] return OrderedDict(out) def get_results(self)", "min_index = (\"None\", \"None\") # type: Tuple[Union[str, float], Union[str, float]]", "Examples: .. code-block:: text ----------------------------------------------------------------- epoch iteration processing_stats dataflow_stats Event_STARTED", "0.00113s] \"\"\" # adopted implementation of torch.autograd.profiler.build_table handler_column_width = max([len(item[0])", "self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for", "= row_format_lst[0] header_sep = header_sep_lst[0] result = [] def append(s:", "= len(engine.state.dataloader) # type: ignore[arg-type] else: if engine.state.epoch_length is None:", "Mapping) -> str: out = \" | \".join([to_str(v) for v", "engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self, engine: Engine) -> None: \"\"\"Attach HandlersTimeProfiler", "iterations, processing_stats, dataflow_stats, event_started, event_completed, event_epoch_started, event_epoch_completed, event_iter_started, event_iter_completed, event_batch_started,", "iteration processing_stats dataflow_stats Event_STARTED ... 1.0 1.0 0.00003 0.252387 0.125676", "ignite.engine.Engine, but given {type(engine)}\") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,),", "# type: Dict[EventEnum, Dict[str, List[float]]] @staticmethod def _get_callable_name(handler: Callable) ->", "0.00029 0.252342 0.125123 \"\"\" try: import pandas as pd except", "(), {})) # revert back the wrapped handlers with original", "headers = [\"processing_stats\", \"dataflow_stats\"] for e in self.event_handlers_times: for h", "attach an engine to it profiler = HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED)", "RuntimeError(\"Need pandas to write results as files\") iters_per_epoch = self.total_num_iters", "row[4] = \"{}s/{}\".format(*row[4]) # type: ignore[misc] del row[1] append(summary_format.format(*row)) print(\"\".join(result))", "Union[str, float, Tuple[Union[float], Union[float]]]]: # compute on non-zero data: data", "hasattr(func, \"_profiler_original\"): engine._event_handlers[e][i] = (func._profiler_original, args, kwargs) def _as_first_started(self, engine:", "std = round(torch.std(data).item(), 5) return [total, min_index, max_index, mean, std]", "self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump = torch.stack( [ epochs, iterations,", "= torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32) cols = [processing_stats,", "---------------------------------------------------- | Time profiling stats (in seconds): | ---------------------------------------------------- total", "# avoid adding internal handlers into output ] for e", "self._event_handlers_timer.reset() def _as_last_completed(self, engine: Engine) -> None: self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()", "_create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable: @functools.wraps(handler) def _timeit_handler(*args:", "# handler used for profiling dataflow times t = self._dataflow_timer.value()", ") def write_results(self, output_path: str) -> None: \"\"\" Method to", "results[\"event_handlers_stats\"].items() } others.update(results[\"event_handlers_names\"]) output_message = \"\"\" ---------------------------------------------------- | Time profiling", "at the end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self, engine: Engine) ->", "[] # type: List[float] self.processing_times = [] # type: List[float]", "to original handlers for e in engine._event_handlers: for i, (func,", "ignore[arg-type] else: if engine.state.epoch_length is None: raise ValueError( \"As epoch_length", "self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {})) # revert back the wrapped", "\" * SPACING_SIZE) line_length_lst[0] += padding + SPACING_SIZE add_column(handler_column_width, text_dir=\"<\")", "odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in", "epoch iteration processing_stats dataflow_stats Event_STARTED ... 1.0 1.0 0.00003 0.252387", "cast import torch from ignite.engine import Engine, EventEnum, Events from", "ignore[misc] row[4] = \"{}s/{}\".format(*row[4]) # type: ignore[misc] del row[1] append(summary_format.format(*row))", "out = \" | \".join([to_str(v) for v in d.values()]) return", "engine should be ignite.engine.Engine, but given {type(engine)}\") if not engine.has_event_handler(self._as_first_started):", "0.00018/679 | 0.00002 | 0.00001 - Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037 |", "+= \"{: \" + text_dir + str(padding) + \"}\" +", "self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]", "Events if e not in self.events_to_ignore] ) # type: Union[int,", "self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t def", "+ (\" \" * SPACING_SIZE) header_sep_lst[0] += \"-\" * padding", "event_epoch_completed, event_iter_started, event_iter_completed, event_batch_started, event_batch_completed, ], dim=1, ).numpy() results_df =", "= torch.stack(cols, dim=1).numpy() results_df = pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path, index=False) @staticmethod", "in Events if e not in self.events_to_ignore ] + [(\"total_time\",", "dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {})) # revert", "| 0.00790 - Events.COMPLETED: [] not yet triggered \"\"\" def", "None: t = self._processing_timer.value() i = engine.state.iteration - 1 self.processing_times[i]", "profile the handlers, data loading and data processing times. Custom", "_ in headers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_format = row_format_lst[0] header_sep = header_sep_lst[0]", "self.max_epochs epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 iterations = torch.arange(self.total_num_iters,", "profiling self.dataflow_times = [] self.processing_times = [] self.event_handlers_times = {e:", "adopted implementation of torch.autograd.profiler.build_table handler_column_width = max([len(item[0]) for item in", "def _as_last_started(self, engine: Engine) -> None: self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value() def", "_as_first_get_batch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_get_batch_completed(self, engine: Engine)", "BasicTimeProfiler to the given engine. Args: engine: the instance of", "print(output_message) return output_message class HandlersTimeProfiler: \"\"\" HandlersTimeProfiler can be used", "v in self.event_handlers_names.items()}, ), ] ) def write_results(self, output_path: str)", "event_handler_stats.append([\"Processing\", \"None\", *compute_basic_stats(self.processing_times)]) event_handler_stats.append([\"Dataflow\", \"None\", *compute_basic_stats(self.dataflow_times)]) return event_handler_stats def write_results(self,", "str): return v elif isinstance(v, tuple): return f\"{v[0]:.5f}/{v[1]}\" return f\"{v:.5f}\"", "not triggered ----------------------------------------- ----------------------- -------------- Total 22.04571 ----------------------------------------- ----------------------- --------------", "... 1 0.00003 0.252387 0.125676 2 0.00029 0.252342 0.125123 \"\"\"", "the engine is run .. code-block:: python results = profiler.get_results()", "to use a list because nonlocal is Py3 only... SPACING_SIZE", "also profiled by this profiler Examples: .. code-block:: python from", "in cols] results_dump = torch.stack(cols, dim=1).numpy() results_df = pd.DataFrame(data=results_dump, columns=headers)", "0.059 run.<locals>.log_time COMPLETED not triggered ----------------------------------------- ----------------------- -------------- Total 22.04571", "others.update(results[\"event_handlers_names\"]) output_message = \"\"\" ---------------------------------------------------- | Time profiling stats (in", "handler used for profiling dataflow times t = self._dataflow_timer.value() self.dataflow_times.append(t)", "None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i]", "Processing function: {processing_stats} Dataflow: {dataflow_stats} Event handlers: {total_time:.5f} - Events.STARTED:", "] event_handler_stats.append([\"Total\", \"\", total_eh_time, \"\", \"\", \"\", \"\"]) event_handler_stats.append([\"Processing\", \"None\",", "| 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000 - Events.ITERATION_STARTED:", "Engine) -> None: # wraps original handlers for profiling self.event_handlers_names", "training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1 0.00003 0.252387 0.125676 2 0.00029 0.252342", "[ self._as_last_epoch_started, self._as_last_epoch_completed, self._as_last_iter_started, self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed, self._as_last_completed, ] def", "SPACING_SIZE add_column(handler_column_width, text_dir=\"<\") add_column(event_column_width, text_dir=\"<\") for _ in headers[2:]: add_column(DEFAULT_COLUMN_WIDTH)", "= { e: [ h.__qualname__ if hasattr(h, \"__qualname__\") else h.__class__.__name__", "[total, min_index, max_index, mean, std] event_handler_stats = [ [ h,", "in Events if e not in self.events_to_ignore] ) # type:", "if len(data) > 1: out += [ (\"min/index\", (torch.min(data).item(), torch.argmin(data).item())),", "output_message class HandlersTimeProfiler: \"\"\" HandlersTimeProfiler can be used to profile", "for h in self.event_handlers_times[e] ] event_handler_stats.append([\"Total\", \"\", total_eh_time, \"\", \"\",", "t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] =", "self.dataflow_times event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)", "\"Event_GET_BATCH_STARTED\", \"Event_GET_BATCH_COMPLETED\", ], ) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: Dict)", "Dict, List, Mapping, Sequence, Tuple, Union, cast import torch from", "(\"max/index\", (torch.max(data).item(), torch.argmax(data).item())), (\"mean\", torch.mean(data).item()), (\"std\", torch.std(data).item()), ] return OrderedDict(out)", "| 0.00000 | 0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 | 0.00001/188", "str: # get name of the callable handler return getattr(handler,", "Engine) -> None: t = self._event_handlers_timer.value() i = engine.state.iteration -", "type: List[float] self.processing_times = [] # type: List[float] self.event_handlers_times =", "for e in engine._allowed_events } self._reset(self.event_handlers_names) for e in engine._allowed_events:", "Union[str, float]] mean = \"None\" # type: Union[str, float] std", "e not in self.events_to_ignore } # Setup all other handlers:", "can be used to profile the handlers, data loading and", "Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times = []", "-> None: self._event_handlers_timer.reset() def _as_last_iter_started(self, engine: Engine) -> None: t", "pandas to write results as files\") processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)", "self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t d = self._dataflow_timer.value() self.dataflow_times[i] = d self._dataflow_timer.reset()", "profiler.get_results() \"\"\" total_eh_time = sum( [(self.event_handlers_times[e]).sum() for e in Events", "self.events_to_ignore ] + [(\"total_time\", total_eh_time)] # type: ignore[list-item] ) return", "(h, _, _) in engine._event_handlers[e] if not self._is_internal_handler(h) ] for", "EventEnum, Events from ignite.handlers.timing import Timer class BasicTimeProfiler: \"\"\" BasicTimeProfiler", "set, we can not use BasicTimeProfiler in this case.\" \"Please,", "type: Dict[EventEnum, torch.Tensor] self._events = [ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED,", ".. code-block:: python from ignite.handlers import HandlersTimeProfiler trainer = Engine(train_updater)", "Let's go self._event_handlers_timer.reset() def _as_last_started(self, engine: Engine) -> None: self.event_handlers_times[Events.STARTED][0]", "[ (\"total\", torch.sum(data).item() if len(data) > 0 else \"not yet", "= torch.tensor(self.dataflow_times, dtype=torch.float32) cols = [processing_stats, dataflow_stats] headers = [\"processing_stats\",", "\"\"\" Method to store the unaggregated profiling results to a", "of the callable handler return getattr(handler, \"__qualname__\", handler.__class__.__name__) def _create_wrapped_handler(self,", "m in zip(self._events, self._fmethods): engine._event_handlers[e].insert(0, (m, (engine,), {})) for e,", "for row in results[-2:]: row[3] = \"{}s/{}\".format(*row[3]) # type: ignore[misc]", "engine.state.max_epochs) self.total_num_iters = self.max_epochs * num_iters_per_epoch self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names =", "in enumerate(engine._event_handlers[e]): if hasattr(func, \"_profiler_original\"): engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)", "torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]: # compute on", "None: \"\"\"Attach HandlersTimeProfiler to the given engine. Args: engine: the", "\"\", \"\", \"\", \"\"]) event_handler_stats.append([\"Processing\", \"None\", *compute_basic_stats(self.processing_times)]) event_handler_stats.append([\"Dataflow\", \"None\", *compute_basic_stats(self.dataflow_times)])", "t self._event_handlers_timer.reset() def _as_last_iter_completed(self, engine: Engine) -> None: t =", "_as_last_iter_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i =", "torch.zeros(total_num_iters) self.processing_times = torch.zeros(total_num_iters) self.event_handlers_times = { Events.STARTED: torch.zeros(1), Events.COMPLETED:", "python profiler.print_results(results) Examples: .. code-block:: text ----------------------------------------- ----------------------- -------------- ...", "in self.events_to_ignore ] + [(\"total_time\", total_eh_time)] # type: ignore[list-item] )", "{})) for e, m in zip(self._events, self._fmethods): engine._event_handlers[e].insert(0, (m, (engine,),", "was attached to event with event filter if not hasattr(handler,", "-> None: self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value() # Remove added handlers: engine.remove_event_handler(self._as_last_started,", "| 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258 Dataflow: 6.11384", "results_dump = torch.stack( [ epochs, iterations, processing_stats, dataflow_stats, event_started, event_completed,", "= dict( [ (str(e.name).replace(\".\", \"_\"), self._compute_basic_stats(self.event_handlers_times[e])) for e in Events", "* padding + (\" \" * SPACING_SIZE) line_length_lst[0] += padding", "processing times. Examples: .. code-block:: python from ignite.handlers import BasicTimeProfiler", "(h, _, _) in engine._event_handlers[e] if \"BasicTimeProfiler.\" not in repr(h)", "1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t def _as_first_get_batch_completed(self, engine: Engine) -> None:", "args, kwargs) in enumerate(engine._event_handlers[e]): if hasattr(func, \"_profiler_original\"): engine._event_handlers[e][i] = (func._profiler_original,", "i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if hasattr(func, \"_profiler_original\"): engine._event_handlers[e][i]", "len(data) > 0 else \"not yet triggered\") ] # type:", "-> None: self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer =", "{})) # Let's go self._event_handlers_timer.reset() def _as_last_started(self, engine: Engine) ->", "engine._event_handlers[e].insert(0, (m, (engine,), {})) for e, m in zip(self._events, self._lmethods):", "profiling setattr(_timeit_handler, \"_profiler_original\", handler) return _timeit_handler def _timeit_processing(self) -> None:", "max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type]", "[ (\"min/index\", (torch.min(data).item(), torch.argmin(data).item())), (\"max/index\", (torch.max(data).item(), torch.argmax(data).item())), (\"mean\", torch.mean(data).item()), (\"std\",", "{}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]\" for", "data = data[data > 0] total = round(torch.sum(data).item(), 5) if", "{ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED}", "python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- epoch iteration processing_stats", "_reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None: # reset the variables", "(self._create_wrapped_handler(func, e), args, kwargs) # processing timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0,", "else \"not triggered\" # type: Union[str, float] min_index = (\"None\",", "length max_len = max([x.numel() for x in cols]) count_col =", "\"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this.\" )", "from ignite.handlers.timing import Timer class BasicTimeProfiler: \"\"\" BasicTimeProfiler can be", "engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_completed(self, engine: Engine) ->", "= [ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED, ]", "{COMPLETED_names} {COMPLETED} \"\"\".format( processing_stats=odict_to_str(results[\"processing_stats\"]), dataflow_stats=odict_to_str(results[\"dataflow_stats\"]), **others, ) print(output_message) return output_message", "\"Timer.\"]) def _detach_profiler_handlers(self, engine: Engine) -> None: # reverts handlers", "wraps original handlers for profiling self.event_handlers_names = { e: [", "csv file Args: output_path: file output path containing a filename", "loading and data processing times. Examples: .. code-block:: python from", "(engine,), {})) for e, m in zip(self._events, self._lmethods): engine._event_handlers[e].append((m, (engine,),", "python results = profiler.get_results() \"\"\" total_eh_time = sum( [ sum(self.event_handlers_times[e][h])", "self._dataflow_timer = Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times", "self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t def", "if engine.state.epoch_length is None: raise ValueError( \"As epoch_length is not", "= data[data > 0] out = [ (\"total\", torch.sum(data).item() if", "into output ] for e in Events if e not", "aggregated profiler results after the engine is run .. code-block::", "Dict[str, Any]]: \"\"\" Method to fetch the aggregated profiler results", "to event with event filter if not hasattr(handler, \"_parent\") or", "profiling dataflow times t = self._dataflow_timer.value() self.dataflow_times.append(t) def _reset(self, event_handlers_names:", "def _as_first_epoch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_completed(self, engine:", "{} # type: Dict[EventEnum, torch.Tensor] self._events = [ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED,", "self._event_handlers_timer.reset() def _as_last_iter_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value()", "max/index: 0.00784s/0, mean: 0.00602s, std: 0.00034s] Dataflow took total 16.24365s", "| 0.00089/1943 | 0.00010 | 0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time',", "h.__qualname__ if hasattr(h, \"__qualname__\") else h.__class__.__name__ for (h, _, _)", "not yet triggered \"\"\" def to_str(v: Union[str, tuple]) -> str:", "type: List[float] self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]]", "-> None: self.dataflow_times = torch.zeros(total_num_iters) self.processing_times = torch.zeros(total_num_iters) self.event_handlers_times =", "- Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED:", "engine to it profiler = HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results():", "num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type] else: if engine.state.epoch_length is", "\"_profiler_original\"): engine._event_handlers[e][i] = (func._profiler_original, args, kwargs) def _as_first_started(self, engine: Engine)", "in order to fix this.\" ) num_iters_per_epoch = engine.state.epoch_length self.max_epochs", "event_iter_started, event_iter_completed, event_batch_started, event_batch_completed, ], dim=1, ).numpy() results_df = pd.DataFrame(", "for e in Events if e not in self.events_to_ignore] )", "0: min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item()) max_index = (round(torch.max(data).item(), 5),", "_as_last_completed(self, engine: Engine) -> None: self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value() # Remove", "def attach(self, engine: Engine) -> None: \"\"\"Attach BasicTimeProfiler to the", "List[float] self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]] @staticmethod", "row in results[-2:]: row[3] = \"{}s/{}\".format(*row[3]) # type: ignore[misc] row[4]", "engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t d = self._dataflow_timer.value() self.dataflow_times[i]", "self._event_handlers_timer.reset() def _as_last_epoch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value()", "times t = self._dataflow_timer.value() self.dataflow_times.append(t) def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]])", "isinstance(v, str): return v elif isinstance(v, tuple): return f\"{v[0]:.5f}/{v[1]}\" return", "event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed", "= { e: [ self._get_callable_name(h) for (h, _, _) in", "given {type(engine)}\") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) @staticmethod", "nonlocal is Py3 only... SPACING_SIZE = 2 row_format_lst = [\"\"]", "ignite.engine import Engine, EventEnum, Events from ignite.handlers.timing import Timer class", "= torch.as_tensor(times, dtype=torch.float32) # compute on non-zero data: data =", "attach(self, engine: Engine) -> None: \"\"\"Attach HandlersTimeProfiler to the given", "t d = self._dataflow_timer.value() self.dataflow_times[i] = d self._dataflow_timer.reset() def _as_first_iter_started(self,", "def _as_first_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_completed(self, engine:", "callable handler return getattr(handler, \"__qualname__\", handler.__class__.__name__) def _create_wrapped_handler(self, handler: Callable,", "1: std = round(torch.std(data).item(), 5) return [total, min_index, max_index, mean,", "engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {})) # revert back the", "torch.argmin(data).item()) max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item()) mean = round(torch.mean(data).item(), 5)", "if not self._is_internal_handler(h) ] for e in engine._allowed_events } self._reset(self.event_handlers_names)", "append(header_sep) # print total handlers time row append(row_format.format(*results[-3])) append(header_sep) summary_format", "def _as_first_iter_completed(self, engine: Engine) -> None: t = self._processing_timer.value() i", "for profiling self.dataflow_times = [] self.processing_times = [] self.event_handlers_times =", "for v in d.values()]) return out others = { k:", "event_handlers_stats = dict( [ (str(e.name).replace(\".\", \"_\"), self._compute_basic_stats(self.event_handlers_times[e])) for e in", "-> None: \"\"\" Method to store the unaggregated profiling results", "h in event_handlers_names[e]} for e in event_handlers_names} @staticmethod def _is_internal_handler(handler:", "type: Union[int, torch.Tensor] event_handlers_stats = dict( [ (str(e.name).replace(\".\", \"_\"), self._compute_basic_stats(self.event_handlers_times[e]))", "torch.autograd.profiler.build_table handler_column_width = max([len(item[0]) for item in results]) + 4", "raise RuntimeError(\"Need pandas to write results as files\") iters_per_epoch =", "float, Tuple[Union[float], Union[float]]]]] if len(data) > 1: out += [", "row_format_lst = [\"\"] header_sep_lst = [\"\"] line_length_lst = [-SPACING_SIZE] def", "object of the profiler and attach an engine to it", "-> str: # get name of the callable handler return", "events, data loading and data processing times. Examples: .. code-block::", "{type(engine)}\") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) def get_results(self)", "self.total_num_iters = self.max_epochs * num_iters_per_epoch self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names = {", "in repr(h) # avoid adding internal handlers into output ]", "EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED", "0.00010 | 0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860 |", "Union[str, float]] max_index = (\"None\", \"None\") # type: Tuple[Union[str, float],", "None: self._event_handlers_timer.reset() handler(*args, **kwargs) t = self._event_handlers_timer.value() hname = self._get_callable_name(handler)", "yet triggered \"\"\" def to_str(v: Union[str, tuple]) -> str: if", "Engine to attach \"\"\" if not isinstance(engine, Engine): raise TypeError(f\"Argument", "max_len = max([x.numel() for x in cols]) count_col = torch.arange(max_len,", "| 0.00018/679 | 0.00002 | 0.00001 - Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037", "times t = self._processing_timer.value() self.processing_times.append(t) def _timeit_dataflow(self) -> None: #", "python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- # processing_stats dataflow_stats", "handler_column_width = max([len(item[0]) for item in results]) + 4 #", "event with event filter if not hasattr(handler, \"_parent\") or t", "# filter profiled time if the handler was attached to", "fetch the aggregated profiler results after the engine is run", "= torch.zeros(total_num_iters) self.processing_times = torch.zeros(total_num_iters) self.event_handlers_times = { Events.STARTED: torch.zeros(1),", "_as_last_started(self, engine: Engine) -> None: self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value() def _as_first_epoch_started(self,", "for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if not self._is_internal_handler(func):", "0.00000 - Events.EPOCH_STARTED: [] 0.00006 | 0.00000/0 | 0.00000/17 |", "ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED not triggered ----------------------------------------- ----------------------- -------------- Total", "profiler.print_results(results) Examples: .. code-block:: text ---------------------------------------------------- | Time profiling stats", "= t def _as_first_get_batch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() self._dataflow_timer.reset()", "total = round(torch.sum(data).item(), 5) if len(data) > 0 else \"not", "**others, ) print(output_message) return output_message class HandlersTimeProfiler: \"\"\" HandlersTimeProfiler can", "] self._fmethods = [ self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed,", "_timeit_processing(self) -> None: # handler used for profiling processing times", "= \"{}s/{}\".format(*row[3]) # type: ignore[misc] row[4] = \"{}s/{}\".format(*row[4]) # type:", "\"_parent\") or t >= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t) # required to revert", "(EPOCH_COMPLETED) ... 1 0.00003 0.252387 0.125676 2 0.00029 0.252342 0.125123", "1.0 0.00003 0.252387 0.125676 1.0 2.0 0.00029 0.252342 0.125123 \"\"\"", "= self.total_num_iters // self.max_epochs epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1", "= \"None\" # type: Union[str, float] std = \"None\" #", ".. code-block:: text ----------------------------------------- ----------------------- -------------- ... Handler Event Name", "cols]) count_col = torch.arange(max_len, dtype=torch.float32) + 1 cols.insert(0, count_col) headers.insert(0,", "= round(float(total_eh_time), 5) def compute_basic_stats( times: Union[Sequence, torch.Tensor] ) ->", "self.processing_times = torch.zeros(total_num_iters) self.event_handlers_times = { Events.STARTED: torch.zeros(1), Events.COMPLETED: torch.zeros(1),", "the handler was attached to event with event filter if", "files\") processing_stats = torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32) cols", "_as_first_iter_completed(self, engine: Engine) -> None: t = self._processing_timer.value() i =", "total handlers time row append(row_format.format(*results[-3])) append(header_sep) summary_format = \"{} took", "engine._event_handlers[e].append((m, (engine,), {})) # Let's go self._event_handlers_timer.reset() def _as_last_started(self, engine:", "(\"processing_stats\", self._compute_basic_stats(self.processing_times)), (\"dataflow_stats\", self._compute_basic_stats(self.dataflow_times)), (\"event_handlers_stats\", event_handlers_stats), ( \"event_handlers_names\", {str(e.name).replace(\".\", \"_\")", "required to revert back to original handler after profiling setattr(_timeit_handler,", "5) if len(data) > 0 else \"not triggered\" # type:", "0.07730 | 0.01258 Dataflow: 6.11384 | 0.00008/1935 | 0.28461/1551 |", "Union, cast import torch from ignite.engine import Engine, EventEnum, Events", "= engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t def _as_first_get_batch_completed(self, engine:", "handlers for profiling self.event_handlers_names = { e: [ self._get_callable_name(h) for", "event filter if not hasattr(handler, \"_parent\") or t >= self.EVENT_FILTER_THESHOLD_TIME:", "output_path: file output path containing a filename .. code-block:: python", "0.14977/13 | 0.12893 | 0.00790 - Events.COMPLETED: [] not yet", "self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t def _as_first_epoch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset()", "[] self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]}", "any(n in repr(handler) for n in [\"HandlersTimeProfiler.\", \"Timer.\"]) def _detach_profiler_handlers(self,", "if len(data) > 0: min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item()) max_index", "= self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started =", "Dict[str, List[float]]] @staticmethod def _get_callable_name(handler: Callable) -> str: # get", "self.processing_times = torch.zeros(1) self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor]", "0.00000/17 | 0.00000 | 0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 |", "-> None: result.append(s) result.append(\"\\n\") result.append(\"\\n\") append(header_sep) append(row_format.format(*headers)) append(header_sep) for row", "to fetch the aggregated profiler results after the engine is", "self.event_handlers_names = { e: [ h.__qualname__ if hasattr(h, \"__qualname__\") else", "and attach an engine to it profiler = HandlersTimeProfiler() profiler.attach(trainer)", "\"\"\" EVENT_FILTER_THESHOLD_TIME = 0.0001 def __init__(self) -> None: self._dataflow_timer =", "self._dataflow_timer.value() self.dataflow_times.append(t) def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None: #", "profiler = BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3)", "num_iters_per_epoch = engine.state.epoch_length self.max_epochs = cast(int, engine.state.max_epochs) self.total_num_iters = self.max_epochs", "# type: ignore[arg-type] else: if engine.state.epoch_length is None: raise ValueError(", "EVENT_FILTER_THESHOLD_TIME = 0.0001 def __init__(self) -> None: self._dataflow_timer = Timer()", "return f\"{v:.5f}\" def odict_to_str(d: Mapping) -> str: out = \"", "(in seconds): | ---------------------------------------------------- total | min/index | max/index |", "engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {})) # dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0,", "in results[-2:]: row[3] = \"{}s/{}\".format(*row[3]) # type: ignore[misc] row[4] =", "Total(s) ----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271", "- Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 | 0.00001/188 | 0.00018/679 | 0.00002", "total_eh_time, \"\", \"\", \"\", \"\"]) event_handler_stats.append([\"Processing\", \"None\", *compute_basic_stats(self.processing_times)]) event_handler_stats.append([\"Dataflow\", \"None\",", "} others.update(results[\"event_handlers_names\"]) output_message = \"\"\" ---------------------------------------------------- | Time profiling stats", "| 0.00001 - Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037 | 0.00006/866 | 0.00089/1943", "import torch from ignite.engine import Engine, EventEnum, Events from ignite.handlers.timing", "= engine.state.iteration - 1 self.processing_times[i] = t self._event_handlers_timer.reset() def _as_last_iter_completed(self,", "- Events.STARTED: [] 0.00000 - Events.EPOCH_STARTED: [] 0.00006 | 0.00000/0", "for h in event_handlers_names[e]} for e in event_handlers_names} @staticmethod def", "max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" EVENT_FILTER_THESHOLD_TIME = 0.0001 def", "maximum length max_len = max([x.numel() for x in cols]) count_col", "= torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 iterations = torch.arange(self.total_num_iters, dtype=torch.float32) +", "(\"event_handlers_stats\", event_handlers_stats), ( \"event_handlers_names\", {str(e.name).replace(\".\", \"_\") + \"_names\": v for", "| mean | std Processing function: {processing_stats} Dataflow: {dataflow_stats} Event", "6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693 Event", "to original handler after profiling setattr(_timeit_handler, \"_profiler_original\", handler) return _timeit_handler", "# adopted implementation of torch.autograd.profiler.build_table handler_column_width = max([len(item[0]) for item", "text_dir=\"<\") add_column(event_column_width, text_dir=\"<\") for _ in headers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_format =", "Union[str, float]]]]: data = torch.as_tensor(times, dtype=torch.float32) # compute on non-zero", "Total 22.04571 ----------------------------------------- ----------------------- -------------- Processing took total 11.29543s [min/index:", "(torch.min(data).item(), torch.argmin(data).item())), (\"max/index\", (torch.max(data).item(), torch.argmax(data).item())), (\"mean\", torch.mean(data).item()), (\"std\", torch.std(data).item()), ]", "wrapped handlers with original handlers at the end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)", "[ h.__qualname__ if hasattr(h, \"__qualname__\") else h.__class__.__name__ for (h, _,", "Callable) -> bool: # checks whether the handler is internal", "def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]: #", "@staticmethod def print_results(results: Dict) -> str: \"\"\" Method to print", "self._event_handlers_timer.reset() def _as_last_iter_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value()", "aggregated results from the profiler Args: results: the aggregated results", "None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i]", "@staticmethod def _get_callable_name(handler: Callable) -> str: # get name of", "= profiler.get_results() \"\"\" total_eh_time = sum( [ sum(self.event_handlers_times[e][h]) for e", "in self.event_handlers_times: for h in self.event_handlers_times[e]: headers.append(f\"{h} ({getattr(e, 'name', str(e))})\")", "output ] for e in Events if e not in", "t def _as_first_get_batch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_get_batch_completed(self,", "0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860 | 0.11529/0 |", "mean: 0.00602s, std: 0.00034s] Dataflow took total 16.24365s [min/index: 0.00533s/1874,", "self.processing_times.append(t) def _timeit_dataflow(self) -> None: # handler used for profiling", "| max/index | mean | std Processing function: {processing_stats} Dataflow:", "args, kwargs) # processing timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (),", "\"\", \"\", \"\"]) event_handler_stats.append([\"Processing\", \"None\", *compute_basic_stats(self.processing_times)]) event_handler_stats.append([\"Dataflow\", \"None\", *compute_basic_stats(self.dataflow_times)]) return", "unaggregated profiling results to a csv file Args: output_path: file", "[min/index: 0.00533s/1874, max/index: 0.01129s/937, mean: 0.00866s, std: 0.00113s] \"\"\" #", "event_handler_stats = [ [ h, getattr(e, \"name\", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),", "(engine,), {})) @staticmethod def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float,", "# type: ignore[arg-type] event_column_width = max([len(item[1]) for item in results])", "None: self._event_handlers_timer.reset() def _as_last_iter_started(self, engine: Engine) -> None: t =", "e, m in zip(self._events, self._fmethods): engine._event_handlers[e].insert(0, (m, (engine,), {})) for", "print_results(results: List[List[Union[str, float]]]) -> None: \"\"\" Method to print the", "def odict_to_str(d: Mapping) -> str: out = \" | \".join([to_str(v)", "| std Processing function: {processing_stats} Dataflow: {dataflow_stats} Event handlers: {total_time:.5f}", "1 0.00003 0.252387 0.125676 2 0.00029 0.252342 0.125123 \"\"\" try:", "append(header_sep) summary_format = \"{} took total {}s [min/index: {}, max/index:", "the callable handler return getattr(handler, \"__qualname__\", handler.__class__.__name__) def _create_wrapped_handler(self, handler:", "= [processing_stats, dataflow_stats] headers = [\"processing_stats\", \"dataflow_stats\"] for e in", "i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if not self._is_internal_handler(func): engine._event_handlers[e][i]", "self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]", "Args: results: the aggregated results from the profiler .. code-block::", "* SPACING_SIZE) header_sep_lst[0] += \"-\" * padding + (\" \"", "[(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]", "used for profiling self.dataflow_times = [] self.processing_times = [] self.event_handlers_times", "code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- # processing_stats", "e not in self.events_to_ignore] ) # type: Union[int, torch.Tensor] event_handlers_stats", "def _timeit_dataflow(self) -> None: # handler used for profiling dataflow", "0 else \"not triggered\" # type: Union[str, float] min_index =", "to fix this.\" ) num_iters_per_epoch = engine.state.epoch_length self.max_epochs = cast(int,", "max_index, mean, std] event_handler_stats = [ [ h, getattr(e, \"name\",", "str = \">\") -> None: row_format_lst[0] += \"{: \" +", "to write results as files\") iters_per_epoch = self.total_num_iters // self.max_epochs", "def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer = Timer()", "hname = self._get_callable_name(handler) # filter profiled time if the handler", "timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {})) # dataflow timer", "std Processing function: 157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730", "this profiler Examples: .. code-block:: python from ignite.handlers import HandlersTimeProfiler", "data loading and data processing times. Examples: .. code-block:: python", "engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {})) # dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED,", "pd.DataFrame( data=results_dump, columns=[ \"epoch\", \"iteration\", \"processing_stats\", \"dataflow_stats\", \"Event_STARTED\", \"Event_COMPLETED\", \"Event_EPOCH_STARTED\",", "self._lmethods = [ self._as_last_epoch_started, self._as_last_epoch_completed, self._as_last_iter_started, self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed, self._as_last_completed,", "raise TypeError(f\"Argument engine should be ignite.engine.Engine, but given {type(engine)}\") if", "torch.zeros(1) self.processing_times = torch.zeros(1) self.event_handlers_times = {} # type: Dict[EventEnum,", "same length cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode=\"constant\",", "event_iter_completed, event_batch_started, event_batch_completed, ], dim=1, ).numpy() results_df = pd.DataFrame( data=results_dump,", "0 else \"not yet triggered\") ] # type: List[Tuple[str, Union[str,", "= \">\") -> None: row_format_lst[0] += \"{: \" + text_dir", "return output_message class HandlersTimeProfiler: \"\"\" HandlersTimeProfiler can be used to", "= self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed =", "internal return any(n in repr(handler) for n in [\"HandlersTimeProfiler.\", \"Timer.\"])", "-> None: t = self._event_handlers_timer.value() e = engine.state.epoch - 1", "torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED:", "path containing a filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: ..", "None: self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value() # Remove added handlers: engine.remove_event_handler(self._as_last_started, Events.STARTED)", "event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed", "Events.COMPLETED, ] self._fmethods = [ self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed, self._as_first_get_batch_started,", "Union[str, float] min_index = (\"None\", \"None\") # type: Tuple[Union[str, float],", "0.125123 \"\"\" try: import pandas as pd except ImportError: raise", ") num_iters_per_epoch = engine.state.epoch_length self.max_epochs = cast(int, engine.state.max_epochs) self.total_num_iters =", "code-block:: python profiler.print_results(results) Examples: .. code-block:: text ----------------------------------------- ----------------------- --------------", "t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_STARTED][i] =", "Examples: .. code-block:: python from ignite.handlers import HandlersTimeProfiler trainer =", "Timer() self.dataflow_times = [] # type: List[float] self.processing_times = []", "in repr(handler) for n in [\"HandlersTimeProfiler.\", \"Timer.\"]) def _detach_profiler_handlers(self, engine:", "internal handlers into output ] for e in Events if", "event_handlers_names} @staticmethod def _is_internal_handler(handler: Callable) -> bool: # checks whether", "padding + SPACING_SIZE add_column(handler_column_width, text_dir=\"<\") add_column(event_column_width, text_dir=\"<\") for _ in", "# type: ignore[list-item] ) return OrderedDict( [ (\"processing_stats\", self._compute_basic_stats(self.processing_times)), (\"dataflow_stats\",", "have same length cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()),", "for k, v in results[\"event_handlers_stats\"].items() } others.update(results[\"event_handlers_names\"]) output_message = \"\"\"", "engine.remove_event_handler(m, e) def attach(self, engine: Engine) -> None: \"\"\"Attach BasicTimeProfiler", "used to profile the handlers, events, data loading and data", "-> None: self._event_handlers_timer.reset() def _as_last_get_batch_completed(self, engine: Engine) -> None: t", "for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]): if hasattr(func, \"_profiler_original\"):", "x in cols] results_dump = torch.stack(cols, dim=1).numpy() results_df = pd.DataFrame(data=results_dump,", "float]] max_index = (\"None\", \"None\") # type: Tuple[Union[str, float], Union[str,", "= Timer() self.dataflow_times = [] # type: List[float] self.processing_times =", "Handler Event Name Total(s) ----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245", "given engine. Args: engine: the instance of Engine to attach", "the handler is internal return any(n in repr(handler) for n", "append(row_format.format(*headers)) append(header_sep) for row in results[:-3]: # format min/idx and", "engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs) # processing timer engine.add_event_handler(Events.ITERATION_STARTED,", "List[float]]] @staticmethod def _get_callable_name(handler: Callable) -> str: # get name", "float] if len(data) > 0: min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item())", "with event filter if not hasattr(handler, \"_parent\") or t >=", "aggregated results from the profiler .. code-block:: python profiler.print_results(results) Examples:", "in self.event_handlers_times[e] ] ) total_eh_time = round(float(total_eh_time), 5) def compute_basic_stats(", "class BasicTimeProfiler: \"\"\" BasicTimeProfiler can be used to profile the", "self._lmethods): engine.remove_event_handler(m, e) def attach(self, engine: Engine) -> None: \"\"\"Attach", "and max/idx row[3] = \"{}/{}\".format(*row[3]) # type: ignore[misc] row[4] =", "self._as_first_get_batch_completed, self._as_first_completed, ] self._lmethods = [ self._as_last_epoch_started, self._as_last_epoch_completed, self._as_last_iter_started, self._as_last_iter_completed,", "ImportError: raise RuntimeError(\"Need pandas to write results as files\") iters_per_epoch", "= self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed =", "\"\"\" try: import pandas as pd except ImportError: raise RuntimeError(\"Need", "ignore[misc] append(row_format.format(*row)) append(header_sep) # print total handlers time row append(row_format.format(*results[-3]))", "total_eh_time = sum( [(self.event_handlers_times[e]).sum() for e in Events if e", "BasicTimeProfiler trainer = Engine(train_updater) # Create an object of the", "self.events_to_ignore] ) # type: Union[int, torch.Tensor] event_handlers_stats = dict( [", "handlers with original handlers at the end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def", "{str(e.name).replace(\".\", \"_\") + \"_names\": v for e, v in self.event_handlers_names.items()},", "in self.event_handlers_names.items()}, ), ] ) def write_results(self, output_path: str) ->", "\"\"\" if not isinstance(engine, Engine): raise TypeError(f\"Argument engine should be", "= torch.zeros(1) self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor] self._events", "Events.DATALOADER_STOP_ITERATION, ] def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer", "+ 1 processing_stats = self.processing_times dataflow_stats = self.dataflow_times event_started =", "torch.zeros(1), Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED:", "\"not yet triggered\") ] # type: List[Tuple[str, Union[str, float, Tuple[Union[float],", "# dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {})) #", "} self._reset(self.event_handlers_names) for e in engine._allowed_events: for i, (func, args,", "print_results(results: Dict) -> str: \"\"\" Method to print the aggregated", "# handler used for profiling processing times t = self._processing_timer.value()", "\"Max(s)/IDX\", \"Mean(s)\", \"Std(s)\", ] # Have to use a list", "profiling stats (in seconds): | ---------------------------------------------------- total | min/index |", "0.28461/1551 | 0.00300 | 0.02693 Event handlers: 2.82721 - Events.STARTED:", "Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 |", "] def __init__(self) -> None: self._dataflow_timer = Timer() self._processing_timer =", "self.event_handlers_names.items()}, ), ] ) def write_results(self, output_path: str) -> None:", "], dim=1, ).numpy() results_df = pd.DataFrame( data=results_dump, columns=[ \"epoch\", \"iteration\",", "t = self._processing_timer.value() i = engine.state.iteration - 1 self.processing_times[i] =", "cols.insert(0, count_col) headers.insert(0, \"#\") # pad all tensors to have", "\"Event_COMPLETED\", \"Event_EPOCH_STARTED\", \"Event_EPOCH_COMPLETED\", \"Event_ITERATION_STARTED\", \"Event_ITERATION_COMPLETED\", \"Event_GET_BATCH_STARTED\", \"Event_GET_BATCH_COMPLETED\", ], ) results_df.to_csv(output_path,", "setattr(_timeit_handler, \"_profiler_original\", handler) return _timeit_handler def _timeit_processing(self) -> None: #", "0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED not", "handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {})) for e, m in zip(self._events, self._fmethods):", "header_sep_lst[0] result = [] def append(s: str) -> None: result.append(s)", "{ Events.STARTED: torch.zeros(1), Events.COMPLETED: torch.zeros(1), Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED:", "- 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t def _as_first_get_batch_completed(self, engine: Engine) ->", "\".join([to_str(v) for v in d.values()]) return out others = {", "took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937, mean: 0.00866s, std:", "\"__qualname__\") else h.__class__.__name__ for (h, _, _) in engine._event_handlers[e] if", "cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode=\"constant\", value=0) for", "results as files\") iters_per_epoch = self.total_num_iters // self.max_epochs epochs =", "str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ] for e in self.event_handlers_times for h", "Engine) -> None: # reverts handlers to original handlers for", "Union[float]]]]: # compute on non-zero data: data = data[data >", ".. code-block:: python profiler.print_results(results) Examples: .. code-block:: text ----------------------------------------- -----------------------", "COMPLETED not triggered ----------------------------------------- ----------------------- -------------- Total 22.04571 ----------------------------------------- -----------------------", "Union[Sequence, torch.Tensor] ) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:", "Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0, mean: 0.00602s,", "handler used for profiling processing times t = self._processing_timer.value() self.processing_times.append(t)", "} def _as_first_started(self, engine: Engine) -> None: if hasattr(engine.state.dataloader, \"__len__\"):", "if isinstance(v, OrderedDict) else v for k, v in results[\"event_handlers_stats\"].items()", "the given engine. Args: engine: the instance of Engine to", "profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded::", "function: {processing_stats} Dataflow: {dataflow_stats} Event handlers: {total_time:.5f} - Events.STARTED: {STARTED_names}", "pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: List[List[Union[str, float]]]) ->", "row_format_lst[0] += \"{: \" + text_dir + str(padding) + \"}\"", "str) -> None: \"\"\" Method to store the unaggregated profiling", "or t >= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t) # required to revert back", "the end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self, engine: Engine) -> None:", "processing times t = self._processing_timer.value() self.processing_times.append(t) def _timeit_dataflow(self) -> None:", "ignore[arg-type] DEFAULT_COLUMN_WIDTH = 14 headers = [ \"Handler\", \"Event Name\",", "torch.as_tensor(times, dtype=torch.float32) # compute on non-zero data: data = data[data", "Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def", "0.12893 | 0.00790 - Events.COMPLETED: [] not yet triggered \"\"\"", "= pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: List[List[Union[str, float]]])", "python from ignite.handlers import BasicTimeProfiler trainer = Engine(train_updater) # Create", "len(data) > 1: std = round(torch.std(data).item(), 5) return [total, min_index,", "import Engine, EventEnum, Events from ignite.handlers.timing import Timer class BasicTimeProfiler:", "['PiecewiseLinear'] 0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001", "to it profiler = HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results())", "-> None: # reverts handlers to original handlers for e", "None: t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_STARTED][i]", "for e in self.event_handlers_times for h in self.event_handlers_times[e] ] event_handler_stats.append([\"Total\",", "[ Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ] def __init__(self) -> None:", "= engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t def _as_first_completed(self, engine:", "raise RuntimeError(\"Need pandas to write results as files\") processing_stats =", "{}, mean: {}s, std: {}s]\" for row in results[-2:]: row[3]", "\"\"\" total_eh_time = sum( [ sum(self.event_handlers_times[e][h]) for e in self.event_handlers_times", "= [\"processing_stats\", \"dataflow_stats\"] for e in self.event_handlers_times: for h in", "Tuple[Union[float], Union[float]]]]: # compute on non-zero data: data = data[data", "result = [] def append(s: str) -> None: result.append(s) result.append(\"\\n\")", "t >= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t) # required to revert back to", "the unaggregated profiling results to a csv file Args: output_path:", "engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) def get_results(self) -> List[List[Union[str, float]]]: \"\"\"", "= round(torch.sum(data).item(), 5) if len(data) > 0 else \"not triggered\"", "torch from ignite.engine import Engine, EventEnum, Events from ignite.handlers.timing import", "engine.state.epoch_length self.max_epochs = cast(int, engine.state.max_epochs) self.total_num_iters = self.max_epochs * num_iters_per_epoch", "results_dump = torch.stack(cols, dim=1).numpy() results_df = pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path, index=False)", "attached to event with event filter if not hasattr(handler, \"_parent\")", "Events.STARTED: {STARTED_names} {STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names}", "# Create an object of the profiler and attach an", "run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED not triggered ----------------------------------------- ----------------------- --------------", "None: # handler used for profiling processing times t =", "total | min/index | max/index | mean | std Processing", "others = { k: odict_to_str(v) if isinstance(v, OrderedDict) else v", "= [] # type: List[float] self.processing_times = [] # type:", "epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 iterations = torch.arange(self.total_num_iters, dtype=torch.float32)", "if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) def get_results(self) ->", "None: if hasattr(engine.state.dataloader, \"__len__\"): num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type]", "torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def _as_first_started(self,", "(\"None\", \"None\") # type: Tuple[Union[str, float], Union[str, float]] mean =", "Events if e not in self.events_to_ignore } # Setup all", "engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t def _as_first_get_batch_completed(self, engine: Engine)", "type: ignore[arg-type] DEFAULT_COLUMN_WIDTH = 14 headers = [ \"Handler\", \"Event", "profiler Examples: .. code-block:: python from ignite.handlers import HandlersTimeProfiler trainer", "_as_first_started(self, engine: Engine) -> None: # wraps original handlers for", "self._fmethods = [ self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed, self._as_first_completed,", "append(header_sep) for row in results[:-3]: # format min/idx and max/idx", "as pd except ImportError: raise RuntimeError(\"Need pandas to write results", "self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t) # required to revert back to original handler", "1: out += [ (\"min/index\", (torch.min(data).item(), torch.argmin(data).item())), (\"max/index\", (torch.max(data).item(), torch.argmax(data).item())),", "type: Tuple[Union[str, float], Union[str, float]] mean = \"None\" # type:", "dataflow_stats, event_started, event_completed, event_epoch_started, event_epoch_completed, event_iter_started, event_iter_completed, event_batch_started, event_batch_completed, ],", "in event_handlers_names} @staticmethod def _is_internal_handler(handler: Callable) -> bool: # checks", "EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED", "5) if len(data) > 1: std = round(torch.std(data).item(), 5) return", "the aggregated results from the profiler Args: results: the aggregated", "self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_STARTED][i] = t self._processing_timer.reset()", "\"{}/{}\".format(*row[4]) # type: ignore[misc] append(row_format.format(*row)) append(header_sep) # print total handlers", "Time profiling stats (in seconds): | ---------------------------------------------------- total | min/index", "self._event_handlers_timer.reset() handler(*args, **kwargs) t = self._event_handlers_timer.value() hname = self._get_callable_name(handler) #", "Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),", "[\"\"] header_sep_lst = [\"\"] line_length_lst = [-SPACING_SIZE] def add_column(padding: int,", "SPACING_SIZE = 2 row_format_lst = [\"\"] header_sep_lst = [\"\"] line_length_lst", "get_results(self) -> List[List[Union[str, float]]]: \"\"\" Method to fetch the aggregated", "the wrapped handlers with original handlers at the end engine.add_event_handler(Events.COMPLETED,", "[] 0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000", "n in [\"HandlersTimeProfiler.\", \"Timer.\"]) def _detach_profiler_handlers(self, engine: Engine) -> None:", "collections import OrderedDict from typing import Any, Callable, Dict, List,", "= sum( [(self.event_handlers_times[e]).sum() for e in Events if e not", "= (self._create_wrapped_handler(func, e), args, kwargs) # processing timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)", "Have to use a list because nonlocal is Py3 only...", "0] total = round(torch.sum(data).item(), 5) if len(data) > 0 else", "Method to print the aggregated results from the profiler Args:", "| 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693 Event handlers:", "19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106", "| Time profiling stats (in seconds): | ---------------------------------------------------- total |", "@staticmethod def _is_internal_handler(handler: Callable) -> bool: # checks whether the", "x in cols]) count_col = torch.arange(max_len, dtype=torch.float32) + 1 cols.insert(0,", "[] 0.00000 - Events.EPOCH_STARTED: [] 0.00006 | 0.00000/0 | 0.00000/17", "import pandas as pd except ImportError: raise RuntimeError(\"Need pandas to", "whether the handler is internal return any(n in repr(handler) for", "_, _) in engine._event_handlers[e] if not self._is_internal_handler(h) ] for e", "for e in self.event_handlers_times for h in self.event_handlers_times[e] ] )", "(engine,), {})) # Let's go self._event_handlers_timer.reset() def _as_last_started(self, engine: Engine)", "def _as_last_iter_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i", "] # Have to use a list because nonlocal is", "functools from collections import OrderedDict from typing import Any, Callable,", "+ str(padding) + \"}\" + (\" \" * SPACING_SIZE) header_sep_lst[0]", "def print_results(results: List[List[Union[str, float]]]) -> None: \"\"\" Method to print", "dim=1).numpy() results_df = pd.DataFrame(data=results_dump, columns=headers) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results:", "max/idx row[3] = \"{}/{}\".format(*row[3]) # type: ignore[misc] row[4] = \"{}/{}\".format(*row[4])", "HandlersTimeProfiler to the given engine. Args: engine: the instance of", "Name Total(s) ----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED", "# reset the variables used for profiling self.dataflow_times = []", "{}, max/index: {}, mean: {}s, std: {}s]\" for row in", "(self._as_first_started, (engine,), {})) def get_results(self) -> List[List[Union[str, float]]]: \"\"\" Method", "e in self.event_handlers_times for h in self.event_handlers_times[e] ] ) total_eh_time", "append(row_format.format(*row)) append(header_sep) # print total handlers time row append(row_format.format(*results[-3])) append(header_sep)", "0.00002 | 0.00001 - Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037 | 0.00006/866 |", "self.event_handlers_times[Events.EPOCH_STARTED][e] = t def _as_first_get_batch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset()", "i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_STARTED][i] = t self._processing_timer.reset() def", "# type: Union[str, float] min_index = (\"None\", \"None\") # type:", "Name\", \"Total(s)\", \"Min(s)/IDX\", \"Max(s)/IDX\", \"Mean(s)\", \"Std(s)\", ] # Have to", "in engine._event_handlers[e] if not self._is_internal_handler(h) ] for e in engine._allowed_events", "self.total_num_iters) self.event_handlers_names = { e: [ h.__qualname__ if hasattr(h, \"__qualname__\")", "= t d = self._dataflow_timer.value() self.dataflow_times[i] = d self._dataflow_timer.reset() def", "= [] self.event_handlers_times = {e: {h: [] for h in", "in cols]) count_col = torch.arange(max_len, dtype=torch.float32) + 1 cols.insert(0, count_col)", "# type: List[float] self.processing_times = [] # type: List[float] self.event_handlers_times", "\"event_handlers_names\", {str(e.name).replace(\".\", \"_\") + \"_names\": v for e, v in", "None: t = self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e]", "def get_results(self) -> List[List[Union[str, float]]]: \"\"\" Method to fetch the", "= {} # type: Dict[EventEnum, Dict[str, List[float]]] @staticmethod def _get_callable_name(handler:", "engine: Engine) -> None: t = self._processing_timer.value() i = engine.state.iteration", "(\"total\", torch.sum(data).item() if len(data) > 0 else \"not yet triggered\")", "0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258 Dataflow: 6.11384 |", "= header_sep_lst[0] result = [] def append(s: str) -> None:", "self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump = torch.stack( [ epochs, iterations, processing_stats, dataflow_stats, event_started,", "time row append(row_format.format(*results[-3])) append(header_sep) summary_format = \"{} took total {}s", "v for e, v in self.event_handlers_names.items()}, ), ] ) def", "processing_stats = self.processing_times dataflow_stats = self.dataflow_times event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed", "engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t def _as_first_completed(self, engine: Engine)", "**kwargs) t = self._event_handlers_timer.value() hname = self._get_callable_name(handler) # filter profiled", "self._as_first_completed, ] self._lmethods = [ self._as_last_epoch_started, self._as_last_epoch_completed, self._as_last_iter_started, self._as_last_iter_completed, self._as_last_get_batch_started,", "# reverts handlers to original handlers for e in engine._event_handlers:", "[ self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed, self._as_first_get_batch_started, self._as_first_get_batch_completed, self._as_first_completed, ] self._lmethods", "( \"event_handlers_names\", {str(e.name).replace(\".\", \"_\") + \"_names\": v for e, v", "of the profiler and attach an engine to it profiler", "engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {})) # revert back the wrapped handlers", "Union[str, tuple]) -> str: if isinstance(v, str): return v elif", "def add_column(padding: int, text_dir: str = \">\") -> None: row_format_lst[0]", "----------------------------------------------------------------- epoch iteration processing_stats dataflow_stats Event_STARTED ... 1.0 1.0 0.00003", "Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def _as_first_started(self, engine: Engine) -> None: if", "only... SPACING_SIZE = 2 row_format_lst = [\"\"] header_sep_lst = [\"\"]", "self._as_last_epoch_completed, self._as_last_iter_started, self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed, self._as_last_completed, ] def _reset(self, num_epochs:", "HandlersTimeProfiler trainer = Engine(train_updater) # Create an object of the", "len(data) > 1: out += [ (\"min/index\", (torch.min(data).item(), torch.argmin(data).item())), (\"max/index\",", "Determine maximum length max_len = max([x.numel() for x in cols])", "> 0: min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item()) max_index = (round(torch.max(data).item(),", "text_dir + str(padding) + \"}\" + (\" \" * SPACING_SIZE)", "on non-zero data: data = data[data > 0] out =", "dataflow_stats] headers = [\"processing_stats\", \"dataflow_stats\"] for e in self.event_handlers_times: for", "OrderedDict) else v for k, v in results[\"event_handlers_stats\"].items() } others.update(results[\"event_handlers_names\"])", "1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t def _as_first_epoch_completed(self, engine: Engine) -> None:", "(engine,), {})) for e, m in zip(self._events, self._fmethods): engine._event_handlers[e].insert(0, (m,", "Engine) -> None: self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value() def _as_first_epoch_started(self, engine: Engine)", "used for profiling processing times t = self._processing_timer.value() self.processing_times.append(t) def", "Engine) -> None: \"\"\"Attach BasicTimeProfiler to the given engine. Args:", "0.00000 | 0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482 | 0.00001/188 |", "result.append(\"\\n\") append(header_sep) append(row_format.format(*headers)) append(header_sep) for row in results[:-3]: # format", "Engine, EventEnum, Events from ignite.handlers.timing import Timer class BasicTimeProfiler: \"\"\"", "code-block:: text ----------------------------------------------------------------- epoch iteration processing_stats dataflow_stats Event_STARTED ... 1.0", "> 0 else \"not yet triggered\") ] # type: List[Tuple[str,", "self._fmethods): engine.remove_event_handler(m, e) for e, m in zip(self._events, self._lmethods): engine.remove_event_handler(m,", "the aggregated profiler results after the engine is run ..", "\"Min(s)/IDX\", \"Max(s)/IDX\", \"Mean(s)\", \"Std(s)\", ] # Have to use a", "code-block:: python results = profiler.get_results() \"\"\" total_eh_time = sum( [(self.event_handlers_times[e]).sum()", "for (h, _, _) in engine._event_handlers[e] if not self._is_internal_handler(h) ]", "self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor] self._events = [", "| ---------------------------------------------------- total | min/index | max/index | mean |", "event_handler_stats.append([\"Dataflow\", \"None\", *compute_basic_stats(self.dataflow_times)]) return event_handler_stats def write_results(self, output_path: str) ->", "# type: ignore[misc] append(row_format.format(*row)) append(header_sep) # print total handlers time", "Union[str, float] if len(data) > 0: min_index = (round(torch.min(data).item(), 5),", "Engine) -> None: t = self._event_handlers_timer.value() e = engine.state.epoch -", "text ----------------------------------------------------------------- # processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1 0.00003", "to profile the handlers, data loading and data processing times.", "in zip(self._events, self._lmethods): engine._event_handlers[e].append((m, (engine,), {})) # Let's go self._event_handlers_timer.reset()", "# Have to use a list because nonlocal is Py3", "processing_stats=odict_to_str(results[\"processing_stats\"]), dataflow_stats=odict_to_str(results[\"dataflow_stats\"]), **others, ) print(output_message) return output_message class HandlersTimeProfiler: \"\"\"", "= self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t", "variables used for profiling self.dataflow_times = [] self.processing_times = []", "None: self._event_handlers_timer.reset() def _as_last_epoch_started(self, engine: Engine) -> None: t =", "{EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names} {COMPLETED} \"\"\".format( processing_stats=odict_to_str(results[\"processing_stats\"]), dataflow_stats=odict_to_str(results[\"dataflow_stats\"]), **others, )", "int) -> None: self.dataflow_times = torch.zeros(total_num_iters) self.processing_times = torch.zeros(total_num_iters) self.event_handlers_times", "{ e: [ self._get_callable_name(h) for (h, _, _) in engine._event_handlers[e]", "profiler .. code-block:: python profiler.print_results(results) Examples: .. code-block:: text -----------------------------------------", "self._is_internal_handler(h) ] for e in engine._allowed_events } self._reset(self.event_handlers_names) for e", "Dataflow: {dataflow_stats} Event handlers: {total_time:.5f} - Events.STARTED: {STARTED_names} {STARTED} -", "not set, we can not use BasicTimeProfiler in this case.\"", "in this case.\" \"Please, set trainer.run(..., epoch_length=epoch_length) in order to", "\"None\", *compute_basic_stats(self.processing_times)]) event_handler_stats.append([\"Dataflow\", \"None\", *compute_basic_stats(self.dataflow_times)]) return event_handler_stats def write_results(self, output_path:", "+ 1 cols.insert(0, count_col) headers.insert(0, \"#\") # pad all tensors", "[] for h in event_handlers_names[e]} for e in event_handlers_names} @staticmethod", "try: import pandas as pd except ImportError: raise RuntimeError(\"Need pandas", "\"{: \" + text_dir + str(padding) + \"}\" + (\"", "self._detach_profiler_handlers) def attach(self, engine: Engine) -> None: \"\"\"Attach HandlersTimeProfiler to", "Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ] def __init__(self) -> None: self._dataflow_timer = Timer()", "Event handlers: 2.82721 - Events.STARTED: [] 0.00000 - Events.EPOCH_STARTED: []", "self.total_num_iters // self.max_epochs epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 iterations", "= self._event_handlers_timer.value() def _as_first_epoch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def", "= t self._processing_timer.reset() def _as_first_iter_completed(self, engine: Engine) -> None: t", "torch.tensor(self.dataflow_times, dtype=torch.float32) cols = [processing_stats, dataflow_stats] headers = [\"processing_stats\", \"dataflow_stats\"]", "list because nonlocal is Py3 only... SPACING_SIZE = 2 row_format_lst", "code-block:: text ---------------------------------------------------- | Time profiling stats (in seconds): |", "files\") iters_per_epoch = self.total_num_iters // self.max_epochs epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch)", "= self.max_epochs * num_iters_per_epoch self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names = { e:", "getattr(handler, \"__qualname__\", handler.__class__.__name__) def _create_wrapped_handler(self, handler: Callable, event: EventEnum) ->", "triggered\") ] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]] if", "self._as_last_get_batch_completed, self._as_last_completed, ] def _reset(self, num_epochs: int, total_num_iters: int) ->", "(self._timeit_processing, (), {})) # dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow,", "to print the aggregated results from the profiler Args: results:", "{STARTED_names} {STARTED} - Events.EPOCH_STARTED: {EPOCH_STARTED_names} {EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED}", "isinstance(v, OrderedDict) else v for k, v in results[\"event_handlers_stats\"].items() }", "h in self.event_handlers_times[e] ] event_handler_stats.append([\"Total\", \"\", total_eh_time, \"\", \"\", \"\",", "self._fmethods): engine._event_handlers[e].insert(0, (m, (engine,), {})) for e, m in zip(self._events,", "row in results[:-3]: # format min/idx and max/idx row[3] =", "\"_names\": v for e, v in self.event_handlers_names.items()}, ), ] )", "Events if e not in self.events_to_ignore ] + [(\"total_time\", total_eh_time)]", "return event_handler_stats def write_results(self, output_path: str) -> None: \"\"\" Method", "] ) total_eh_time = round(float(total_eh_time), 5) def compute_basic_stats( times: Union[Sequence,", "code-block:: python from ignite.handlers import HandlersTimeProfiler trainer = Engine(train_updater) #", "2.0 0.00029 0.252342 0.125123 \"\"\" try: import pandas as pd", "e in engine._allowed_events } self._reset(self.event_handlers_names) for e in engine._allowed_events: for", "type: ignore[misc] row[4] = \"{}s/{}\".format(*row[4]) # type: ignore[misc] del row[1]", "- 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t def _as_first_epoch_completed(self, engine: Engine) ->", "be ignite.engine.Engine, but given {type(engine)}\") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started,", "0.00001/188 | 0.00018/679 | 0.00002 | 0.00001 - Events.ITERATION_COMPLETED: ['TerminateOnNan']", "+ 4 # type: ignore[arg-type] event_column_width = max([len(item[1]) for item", "cols = [processing_stats, dataflow_stats] headers = [\"processing_stats\", \"dataflow_stats\"] for e", "11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0, mean: 0.00602s, std: 0.00034s] Dataflow", "dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1 iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1 processing_stats", "avoid adding internal handlers into output ] for e in", "float] std = \"None\" # type: Union[str, float] if len(data)", "- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED:", "] return OrderedDict(out) def get_results(self) -> Dict[str, Dict[str, Any]]: \"\"\"", "_compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]: # compute", "----------------------- -------------- Total 22.04571 ----------------------------------------- ----------------------- -------------- Processing took total", "= self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed =", "self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_STARTED][e] = t def", "-> List[List[Union[str, float]]]: \"\"\" Method to fetch the aggregated profiler", "min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item()) max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item())", "e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_STARTED][e] = t def _as_first_get_batch_started(self,", "Timer class BasicTimeProfiler: \"\"\" BasicTimeProfiler can be used to profile", "* num_iters_per_epoch self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names = { e: [ h.__qualname__", "odict_to_str(d: Mapping) -> str: out = \" | \".join([to_str(v) for", "torch.mean(data).item()), (\"std\", torch.std(data).item()), ] return OrderedDict(out) def get_results(self) -> Dict[str,", "used to profile the handlers, data loading and data processing", "# checks whether the handler is internal return any(n in", "len(data) > 0 else \"not triggered\" # type: Union[str, float]", "Timer() self._event_handlers_timer = Timer() self.dataflow_times = [] # type: List[float]", "# type: ignore[arg-type] DEFAULT_COLUMN_WIDTH = 14 headers = [ \"Handler\",", "add_column(padding: int, text_dir: str = \">\") -> None: row_format_lst[0] +=", "- 1 self.event_handlers_times[Events.EPOCH_STARTED][e] = t def _as_first_get_batch_started(self, engine: Engine) ->", "Engine) -> None: t = self._processing_timer.value() i = engine.state.iteration -", "loading and data processing times. Custom events are also profiled", "output path containing a filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples:", "['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860 | 0.11529/0 | 0.14977/13 | 0.12893", "{})) def get_results(self) -> List[List[Union[str, float]]]: \"\"\" Method to fetch", "events_to_ignore = [ Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ] def __init__(self)", "\"\"\" # adopted implementation of torch.autograd.profiler.build_table handler_column_width = max([len(item[0]) for", "self._event_handlers_timer.reset() def _as_last_epoch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value()", "= \" | \".join([to_str(v) for v in d.values()]) return out", "engine: Engine) -> None: self._event_handlers_timer.reset() self._dataflow_timer.reset() def _as_last_get_batch_started(self, engine: Engine)", "self.processing_times = [] # type: List[float] self.event_handlers_times = {} #", "- Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED:", "_get_callable_name(handler: Callable) -> str: # get name of the callable", "self._event_handlers_timer.reset() def _as_last_get_batch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value()", "self._processing_timer.value() i = engine.state.iteration - 1 self.processing_times[i] = t self._event_handlers_timer.reset()", "def attach(self, engine: Engine) -> None: \"\"\"Attach HandlersTimeProfiler to the", "14 headers = [ \"Handler\", \"Event Name\", \"Total(s)\", \"Min(s)/IDX\", \"Max(s)/IDX\",", "> 0] total = round(torch.sum(data).item(), 5) if len(data) > 0", "result.append(\"\\n\") result.append(\"\\n\") append(header_sep) append(row_format.format(*headers)) append(header_sep) for row in results[:-3]: #", "repr(h) # avoid adding internal handlers into output ] for", "Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ] def __init__(self) -> None: self._dataflow_timer", "profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" events_to_ignore = [ Events.EXCEPTION_RAISED, Events.TERMINATE,", "-> None: self._event_handlers_timer.reset() def _as_last_epoch_completed(self, engine: Engine) -> None: t", "std: {}s]\" for row in results[-2:]: row[3] = \"{}s/{}\".format(*row[3]) #", "e in engine._event_handlers: for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):", "args, kwargs) in enumerate(engine._event_handlers[e]): if not self._is_internal_handler(func): engine._event_handlers[e][i] = (self._create_wrapped_handler(func,", "# type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]] if len(data) >", "= self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump = torch.stack( [ epochs,", "e in Events if e not in self.events_to_ignore] ) #", "event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch) event_iter_started", "def _get_callable_name(handler: Callable) -> str: # get name of the", "(round(torch.max(data).item(), 5), torch.argmax(data).item()) mean = round(torch.mean(data).item(), 5) if len(data) >", "type: ignore[arg-type] event_column_width = max([len(item[1]) for item in results]) +", "= d self._dataflow_timer.reset() def _as_first_iter_started(self, engine: Engine) -> None: self._event_handlers_timer.reset()", "**kwargs: Any) -> None: self._event_handlers_timer.reset() handler(*args, **kwargs) t = self._event_handlers_timer.value()", "Engine) -> None: if hasattr(engine.state.dataloader, \"__len__\"): num_iters_per_epoch = len(engine.state.dataloader) #", "0.00001 - Events.ITERATION_COMPLETED: ['TerminateOnNan'] 0.20037 | 0.00006/866 | 0.00089/1943 |", "item in results]) + 4 # type: ignore[arg-type] event_column_width =", "0.00034s] Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937, mean:", "for e in self.event_handlers_times: for h in self.event_handlers_times[e]: headers.append(f\"{h} ({getattr(e,", "kwargs) # processing timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))", "2 0.00029 0.252342 0.125123 \"\"\" try: import pandas as pd", "max/index: {}, mean: {}s, std: {}s]\" for row in results[-2:]:", "in engine._allowed_events } self._reset(self.event_handlers_names) for e in engine._allowed_events: for i,", "f\"{v:.5f}\" def odict_to_str(d: Mapping) -> str: out = \" |", "processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1 0.00003 0.252387 0.125676 2", "is None: raise ValueError( \"As epoch_length is not set, we", "Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ] def __init__(self) -> None: self._dataflow_timer =", "text ----------------------------------------------------------------- epoch iteration processing_stats dataflow_stats Event_STARTED ... 1.0 1.0", "attach an engine to it profiler = BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED)", "import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast", "engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_started(self, engine: Engine) ->", "instance of Engine to attach \"\"\" if not isinstance(engine, Engine):", "[min/index: 0.00393s/1875, max/index: 0.00784s/0, mean: 0.00602s, std: 0.00034s] Dataflow took", "| 0.07730 | 0.01258 Dataflow: 6.11384 | 0.00008/1935 | 0.28461/1551", "= t def _as_first_get_batch_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def", ".. code-block:: text ---------------------------------------------------- | Time profiling stats (in seconds):", "results: the aggregated results from the profiler .. code-block:: python", "Dict[EventEnum, torch.Tensor] self._events = [ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED,", "# print total handlers time row append(row_format.format(*results[-3])) append(header_sep) summary_format =", "Union[float]]]]] if len(data) > 1: out += [ (\"min/index\", (torch.min(data).item(),", "Setup all other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {})) for e, m", "-> None: self._event_handlers_timer.reset() self._dataflow_timer.reset() def _as_last_get_batch_started(self, engine: Engine) -> None:", "max_len - x.numel()), mode=\"constant\", value=0) for x in cols] results_dump", "-------------- ... Handler Event Name Total(s) ----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results", "len(engine.state.dataloader) # type: ignore[arg-type] else: if engine.state.epoch_length is None: raise", "e not in self.events_to_ignore ] + [(\"total_time\", total_eh_time)] # type:", "# type: Union[str, float] if len(data) > 0: min_index =", "ignore[arg-type] event_column_width = max([len(item[1]) for item in results]) + 4", "self._dataflow_timer.reset() def _as_last_get_batch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value()", "self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names = { e: [ h.__qualname__ if hasattr(h,", "results after the engine is run .. code-block:: python results", "ignore[list-item] ) return OrderedDict( [ (\"processing_stats\", self._compute_basic_stats(self.processing_times)), (\"dataflow_stats\", self._compute_basic_stats(self.dataflow_times)), (\"event_handlers_stats\",", "std: 0.00113s] \"\"\" # adopted implementation of torch.autograd.profiler.build_table handler_column_width =", "None: self._event_handlers_timer.reset() def _as_last_completed(self, engine: Engine) -> None: self.event_handlers_times[Events.COMPLETED][0] =", "engine. Args: engine: the instance of Engine to attach \"\"\"", "= (\"None\", \"None\") # type: Tuple[Union[str, float], Union[str, float]] max_index", "0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED not triggered ----------------------------------------- -----------------------", "self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value() def _as_first_epoch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset()", "-> None: # wraps original handlers for profiling self.event_handlers_names =", "engine.remove_event_handler(self._as_last_started, Events.STARTED) for e, m in zip(self._events, self._fmethods): engine.remove_event_handler(m, e)", "\"Handler\", \"Event Name\", \"Total(s)\", \"Min(s)/IDX\", \"Max(s)/IDX\", \"Mean(s)\", \"Std(s)\", ] #", "return _timeit_handler def _timeit_processing(self) -> None: # handler used for", "epoch_length is not set, we can not use BasicTimeProfiler in", "= Engine(train_updater) # Create an object of the profiler and", "(\"min/index\", (torch.min(data).item(), torch.argmin(data).item())), (\"max/index\", (torch.max(data).item(), torch.argmax(data).item())), (\"mean\", torch.mean(data).item()), (\"std\", torch.std(data).item()),", "(\"None\", \"None\") # type: Tuple[Union[str, float], Union[str, float]] max_index =", "torch.stack( [ epochs, iterations, processing_stats, dataflow_stats, event_started, event_completed, event_epoch_started, event_epoch_completed,", ".. code-block:: text ----------------------------------------------------------------- epoch iteration processing_stats dataflow_stats Event_STARTED ...", "torch.argmax(data).item()) mean = round(torch.mean(data).item(), 5) if len(data) > 1: std", "Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED, ] self._fmethods =", "_as_last_get_batch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() i =", "Engine) -> None: self._event_handlers_timer.reset() def _as_last_iter_started(self, engine: Engine) -> None:", "ValueError( \"As epoch_length is not set, we can not use", "type: Union[str, float] std = \"None\" # type: Union[str, float]", "to_str(v: Union[str, tuple]) -> str: if isinstance(v, str): return v", "def _timeit_handler(*args: Any, **kwargs: Any) -> None: self._event_handlers_timer.reset() handler(*args, **kwargs)", "mean = round(torch.mean(data).item(), 5) if len(data) > 1: std =", "1 self.event_handlers_times[Events.ITERATION_STARTED][i] = t self._processing_timer.reset() def _as_first_iter_completed(self, engine: Engine) ->", "self.processing_times = [] self.event_handlers_times = {e: {h: [] for h", "5), torch.argmax(data).item()) mean = round(torch.mean(data).item(), 5) if len(data) > 1:", "BasicTimeProfiler: \"\"\" BasicTimeProfiler can be used to profile the handlers,", ".. code-block:: python profiler.print_results(results) Examples: .. code-block:: text ---------------------------------------------------- |", "- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names} {COMPLETED} \"\"\".format( processing_stats=odict_to_str(results[\"processing_stats\"]),", "{})) # dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))", "\"Std(s)\", ] # Have to use a list because nonlocal", "# type: List[float] self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str,", "OrderedDict from typing import Any, Callable, Dict, List, Mapping, Sequence,", "to it profiler = BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results())", "engine: Engine) -> None: self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value() def _as_first_epoch_started(self, engine:", "= [] self.processing_times = [] self.event_handlers_times = {e: {h: []", "\"\"\" Method to print the aggregated results from the profiler", "handler was attached to event with event filter if not", "= t def _as_first_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset() def", "[min/index: {}, max/index: {}, mean: {}s, std: {}s]\" for row", "1 self.event_handlers_times[Events.EPOCH_STARTED][e] = t def _as_first_get_batch_started(self, engine: Engine) -> None:", "(m, (engine,), {})) for e, m in zip(self._events, self._lmethods): engine._event_handlers[e].append((m,", "| std Processing function: 157.46292 | 0.01452/1501 | 0.26905/0 |", "= (\"None\", \"None\") # type: Tuple[Union[str, float], Union[str, float]] mean", "self._lmethods): engine._event_handlers[e].append((m, (engine,), {})) # Let's go self._event_handlers_timer.reset() def _as_last_started(self,", "self._event_handlers_timer.value() def _as_first_epoch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_started(self,", "columns=[ \"epoch\", \"iteration\", \"processing_stats\", \"dataflow_stats\", \"Event_STARTED\", \"Event_COMPLETED\", \"Event_EPOCH_STARTED\", \"Event_EPOCH_COMPLETED\", \"Event_ITERATION_STARTED\",", "= {} # type: Dict[EventEnum, torch.Tensor] self._events = [ Events.EPOCH_STARTED,", "def _as_last_epoch_started(self, engine: Engine) -> None: t = self._event_handlers_timer.value() e", "= [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode=\"constant\", value=0) for x", "ignore[misc] row[4] = \"{}/{}\".format(*row[4]) # type: ignore[misc] append(row_format.format(*row)) append(header_sep) #", "sum( [(self.event_handlers_times[e]).sum() for e in Events if e not in", "Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}", "{ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names} {COMPLETED} \"\"\".format(", "] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]] if len(data)", "= self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t", "Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED, ] self._fmethods = [ self._as_first_epoch_started, self._as_first_epoch_completed,", "= \"None\" # type: Union[str, float] if len(data) > 0:", "\">\") -> None: row_format_lst[0] += \"{: \" + text_dir +", "row_format = row_format_lst[0] header_sep = header_sep_lst[0] result = [] def", "max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item()) mean = round(torch.mean(data).item(), 5) if", "profiler and attach an engine to it profiler = HandlersTimeProfiler()", "import OrderedDict from typing import Any, Callable, Dict, List, Mapping,", "we can not use BasicTimeProfiler in this case.\" \"Please, set", "Events.GET_BATCH_COMPLETED, Events.COMPLETED, ] self._fmethods = [ self._as_first_epoch_started, self._as_first_epoch_completed, self._as_first_iter_started, self._as_first_iter_completed,", "except ImportError: raise RuntimeError(\"Need pandas to write results as files\")", "write results as files\") iters_per_epoch = self.total_num_iters // self.max_epochs epochs", "code-block:: python results = profiler.get_results() \"\"\" total_eh_time = sum( [", "handlers, events, data loading and data processing times. Examples: ..", "e), args, kwargs) # processing timer engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset) engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing,", "not hasattr(handler, \"_parent\") or t >= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t) # required", "= engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_STARTED][e] = t def _as_first_get_batch_started(self, engine:", "Tuple[Union[str, float], Union[str, float]] max_index = (\"None\", \"None\") # type:", "Examples: .. code-block:: text ---------------------------------------------------- | Time profiling stats (in", "# wraps original handlers for profiling self.event_handlers_names = { e:", "= torch.arange(max_len, dtype=torch.float32) + 1 cols.insert(0, count_col) headers.insert(0, \"#\") #", "0.125676 1.0 2.0 0.00029 0.252342 0.125123 \"\"\" try: import pandas", "for e in Events if e not in self.events_to_ignore }", "self.event_handlers_times[event][hname].append(t) # required to revert back to original handler after", "else v for k, v in results[\"event_handlers_stats\"].items() } others.update(results[\"event_handlers_names\"]) output_message", "trainer = Engine(train_updater) # Create an object of the profiler", "given {type(engine)}\") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) def", "std Processing function: {processing_stats} Dataflow: {dataflow_stats} Event handlers: {total_time:.5f} -", "----------------------------------------- ----------------------- -------------- Total 22.04571 ----------------------------------------- ----------------------- -------------- Processing took", "-> None: row_format_lst[0] += \"{: \" + text_dir + str(padding)", "\"{}/{}\".format(*row[3]) # type: ignore[misc] row[4] = \"{}/{}\".format(*row[4]) # type: ignore[misc]", "triggered\" # type: Union[str, float] min_index = (\"None\", \"None\") #", "{EPOCH_STARTED} - Events.ITERATION_STARTED: {ITERATION_STARTED_names} {ITERATION_STARTED} - Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} -", "def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\"", "Events.COMPLETED: {COMPLETED_names} {COMPLETED} \"\"\".format( processing_stats=odict_to_str(results[\"processing_stats\"]), dataflow_stats=odict_to_str(results[\"dataflow_stats\"]), **others, ) print(output_message) return", "k, v in results[\"event_handlers_stats\"].items() } others.update(results[\"event_handlers_names\"]) output_message = \"\"\" ----------------------------------------------------", "Events.STARTED: [] 0.00000 - Events.EPOCH_STARTED: [] 0.00006 | 0.00000/0 |", "for h in self.event_handlers_times[e]: headers.append(f\"{h} ({getattr(e, 'name', str(e))})\") cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))", "\"-\" * padding + (\" \" * SPACING_SIZE) line_length_lst[0] +=", "[ self._get_callable_name(h) for (h, _, _) in engine._event_handlers[e] if not", "# type: Union[int, torch.Tensor] event_handlers_stats = dict( [ (str(e.name).replace(\".\", \"_\"),", "), ] ) def write_results(self, output_path: str) -> None: \"\"\"", "| 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790 - Events.COMPLETED:", "['TerminateOnNan'] 0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003", "revert back to original handler after profiling setattr(_timeit_handler, \"_profiler_original\", handler)", "@trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6", "self.max_epochs * num_iters_per_epoch self._reset(self.max_epochs, self.total_num_iters) self.event_handlers_names = { e: [", "+= \"-\" * padding + (\" \" * SPACING_SIZE) line_length_lst[0]", "results from the profiler Args: results: the aggregated results from", "it profiler = HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader,", "text ----------------------------------------- ----------------------- -------------- ... Handler Event Name Total(s) -----------------------------------------", ".. code-block:: text ----------------------------------------------------------------- # processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...", "append(header_sep) append(row_format.format(*headers)) append(header_sep) for row in results[:-3]: # format min/idx", "time if the handler was attached to event with event", "self.event_handlers_names = { e: [ self._get_callable_name(h) for (h, _, _)", "event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED] results_dump = torch.stack( [", "a list because nonlocal is Py3 only... SPACING_SIZE = 2", "- Events.COMPLETED: [] not yet triggered \"\"\" def to_str(v: Union[str,", "total_eh_time)] # type: ignore[list-item] ) return OrderedDict( [ (\"processing_stats\", self._compute_basic_stats(self.processing_times)),", "handler return getattr(handler, \"__qualname__\", handler.__class__.__name__) def _create_wrapped_handler(self, handler: Callable, event:", "go self._event_handlers_timer.reset() def _as_last_started(self, engine: Engine) -> None: self.event_handlers_times[Events.STARTED][0] =", "the profiler Args: results: the aggregated results from the profiler", "4 # type: ignore[arg-type] DEFAULT_COLUMN_WIDTH = 14 headers = [", "self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t def _as_first_completed(self, engine: Engine) -> None: self._event_handlers_timer.reset()", "0.01258 Dataflow: 6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 |", "get name of the callable handler return getattr(handler, \"__qualname__\", handler.__class__.__name__)", "----------------------------------------------------------------- # processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ... 1 0.00003 0.252387", "def _as_first_iter_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_iter_started(self, engine:", "handler.__class__.__name__) def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable: @functools.wraps(handler)", "self._event_handlers_timer = Timer() self.dataflow_times = [] # type: List[float] self.processing_times", "0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache',", "as files\") processing_stats = torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)", "kwargs) def _as_first_started(self, engine: Engine) -> None: # wraps original", "= BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv')", "type: ignore[list-item] ) return OrderedDict( [ (\"processing_stats\", self._compute_basic_stats(self.processing_times)), (\"dataflow_stats\", self._compute_basic_stats(self.dataflow_times)),", "engine to it profiler = BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results():", "\"\"\" Method to fetch the aggregated profiler results after the", "return any(n in repr(handler) for n in [\"HandlersTimeProfiler.\", \"Timer.\"]) def", "\"processing_stats\", \"dataflow_stats\", \"Event_STARTED\", \"Event_COMPLETED\", \"Event_EPOCH_STARTED\", \"Event_EPOCH_COMPLETED\", \"Event_ITERATION_STARTED\", \"Event_ITERATION_COMPLETED\", \"Event_GET_BATCH_STARTED\", \"Event_GET_BATCH_COMPLETED\",", "row[3] = \"{}/{}\".format(*row[3]) # type: ignore[misc] row[4] = \"{}/{}\".format(*row[4]) #", "Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names}", "h, getattr(e, \"name\", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ] for e in", "for profiling self.event_handlers_names = { e: [ self._get_callable_name(h) for (h,", "self.processing_times dataflow_stats = self.dataflow_times event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)", "if e not in self.events_to_ignore] ) # type: Union[int, torch.Tensor]", "{})) # revert back the wrapped handlers with original handlers", "1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t d = self._dataflow_timer.value() self.dataflow_times[i] = d", "return v elif isinstance(v, tuple): return f\"{v[0]:.5f}/{v[1]}\" return f\"{v:.5f}\" def", "Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast import", "Timer() self._event_handlers_timer = Timer() self.dataflow_times = torch.zeros(1) self.processing_times = torch.zeros(1)", "self._processing_timer.value() self.processing_times.append(t) def _timeit_dataflow(self) -> None: # handler used for", "torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), }", "mean = \"None\" # type: Union[str, float] std = \"None\"", "handlers for e in engine._event_handlers: for i, (func, args, kwargs)", "tensors to have same length cols = [torch.nn.functional.pad(x, pad=(0, max_len", "but given {type(engine)}\") if not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))", "Examples: .. code-block:: text ----------------------------------------- ----------------------- -------------- ... Handler Event", "Tuple[Union[str, float], Union[str, float]] mean = \"None\" # type: Union[str,", "\" * SPACING_SIZE) header_sep_lst[0] += \"-\" * padding + (\"", "= [ self._as_last_epoch_started, self._as_last_epoch_completed, self._as_last_iter_started, self._as_last_iter_completed, self._as_last_get_batch_started, self._as_last_get_batch_completed, self._as_last_completed, ]", "max/index | mean | std Processing function: 157.46292 | 0.01452/1501", "results]) + 4 # type: ignore[arg-type] event_column_width = max([len(item[1]) for", "\"\"\" BasicTimeProfiler can be used to profile the handlers, events,", "m in zip(self._events, self._fmethods): engine.remove_event_handler(m, e) for e, m in", "_as_first_epoch_started(self, engine: Engine) -> None: self._event_handlers_timer.reset() def _as_last_epoch_started(self, engine: Engine)", "torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32) cols = [processing_stats, dataflow_stats]", "self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times = torch.zeros(1) self.processing_times", "# required to revert back to original handler after profiling", "add_column(handler_column_width, text_dir=\"<\") add_column(event_column_width, text_dir=\"<\") for _ in headers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_format", "ignite.handlers import HandlersTimeProfiler trainer = Engine(train_updater) # Create an object", "str) -> None: result.append(s) result.append(\"\\n\") result.append(\"\\n\") append(header_sep) append(row_format.format(*headers)) append(header_sep) for", "append(s: str) -> None: result.append(s) result.append(\"\\n\") result.append(\"\\n\") append(header_sep) append(row_format.format(*headers)) append(header_sep)", "columns=headers) results_df.to_csv(output_path, index=False) @staticmethod def print_results(results: List[List[Union[str, float]]]) -> None:", "implementation of torch.autograd.profiler.build_table handler_column_width = max([len(item[0]) for item in results])", "mean: {}s, std: {}s]\" for row in results[-2:]: row[3] =", "code-block:: python from ignite.handlers import BasicTimeProfiler trainer = Engine(train_updater) #", "| 0.28461/1551 | 0.00300 | 0.02693 Event handlers: 2.82721 -", "total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937, mean: 0.00866s, std: 0.00113s]", "+ text_dir + str(padding) + \"}\" + (\" \" *", "to have same length cols = [torch.nn.functional.pad(x, pad=(0, max_len -", "engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) def get_results(self) -> List[List[Union[str, float]]]:", "(str(e.name).replace(\".\", \"_\"), self._compute_basic_stats(self.event_handlers_times[e])) for e in Events if e not", "results = profiler.get_results() \"\"\" total_eh_time = sum( [(self.event_handlers_times[e]).sum() for e", "\"epoch\", \"iteration\", \"processing_stats\", \"dataflow_stats\", \"Event_STARTED\", \"Event_COMPLETED\", \"Event_EPOCH_STARTED\", \"Event_EPOCH_COMPLETED\", \"Event_ITERATION_STARTED\", \"Event_ITERATION_COMPLETED\",", "BasicTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') ..", "Event_STARTED ... 1.0 1.0 0.00003 0.252387 0.125676 1.0 2.0 0.00029", "(\"mean\", torch.mean(data).item()), (\"std\", torch.std(data).item()), ] return OrderedDict(out) def get_results(self) ->", "sum(self.event_handlers_times[e][h]) for e in self.event_handlers_times for h in self.event_handlers_times[e] ]", "= \"\"\" ---------------------------------------------------- | Time profiling stats (in seconds): |", "because nonlocal is Py3 only... SPACING_SIZE = 2 row_format_lst =", "'training.<locals>.log_elapsed_time', ] 2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 |", "] for e in Events if e not in self.events_to_ignore", "times. Custom events are also profiled by this profiler Examples:", "zip(self._events, self._lmethods): engine.remove_event_handler(m, e) def attach(self, engine: Engine) -> None:", "BasicTimeProfiler can be used to profile the handlers, events, data", "0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693 Event handlers: 2.82721", "\"{} took total {}s [min/index: {}, max/index: {}, mean: {}s,", "self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times = [] #", "by this profiler Examples: .. code-block:: python from ignite.handlers import", "torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def _as_first_started(self, engine: Engine) -> None:", "\"\"]) event_handler_stats.append([\"Processing\", \"None\", *compute_basic_stats(self.processing_times)]) event_handler_stats.append([\"Dataflow\", \"None\", *compute_basic_stats(self.dataflow_times)]) return event_handler_stats def", "[] not yet triggered \"\"\" def to_str(v: Union[str, tuple]) ->", "bool: # checks whether the handler is internal return any(n", "return f\"{v[0]:.5f}/{v[1]}\" return f\"{v:.5f}\" def odict_to_str(d: Mapping) -> str: out", "= self._event_handlers_timer.value() # Remove added handlers: engine.remove_event_handler(self._as_last_started, Events.STARTED) for e,", "row_format_lst[0] header_sep = header_sep_lst[0] result = [] def append(s: str)", "times. Examples: .. code-block:: python from ignite.handlers import BasicTimeProfiler trainer", "k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v", "----------------------- -------------- Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0,", "hasattr(handler, \"_parent\") or t >= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t) # required to", "EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED 0.059 run.<locals>.log_time COMPLETED not triggered -----------------------------------------", "output_message = \"\"\" ---------------------------------------------------- | Time profiling stats (in seconds):", "added handlers: engine.remove_event_handler(self._as_last_started, Events.STARTED) for e, m in zip(self._events, self._fmethods):", "t = self._event_handlers_timer.value() e = engine.state.epoch - 1 self.event_handlers_times[Events.EPOCH_STARTED][e] =", "on non-zero data: data = data[data > 0] total =", "0.125676 2 0.00029 0.252342 0.125123 \"\"\" try: import pandas as", "self.processing_times[i] = t self._event_handlers_timer.reset() def _as_last_iter_completed(self, engine: Engine) -> None:", "for e in Events if e not in self.events_to_ignore ]", "torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_STARTED: torch.zeros(total_num_iters), } def _as_first_started(self, engine: Engine)", "= \"{}/{}\".format(*row[4]) # type: ignore[misc] append(row_format.format(*row)) append(header_sep) # print total", "original handlers for profiling self.event_handlers_names = { e: [ self._get_callable_name(h)", "type: Union[str, float] min_index = (\"None\", \"None\") # type: Tuple[Union[str,", "-------------- Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0, mean:", "seconds): | ---------------------------------------------------- total | min/index | max/index | mean", "other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {})) for e, m in zip(self._events,", "in zip(self._events, self._fmethods): engine.remove_event_handler(m, e) for e, m in zip(self._events,", "min_index, max_index, mean, std] event_handler_stats = [ [ h, getattr(e,", "fix this.\" ) num_iters_per_epoch = engine.state.epoch_length self.max_epochs = cast(int, engine.state.max_epochs)", "import Timer class BasicTimeProfiler: \"\"\" BasicTimeProfiler can be used to", "\"\"\" total_eh_time = sum( [(self.event_handlers_times[e]).sum() for e in Events if", "[] def append(s: str) -> None: result.append(s) result.append(\"\\n\") result.append(\"\\n\") append(header_sep)", "engine: Engine) -> None: t = self._event_handlers_timer.value() e = engine.state.epoch", "Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast import torch", "not in self.events_to_ignore] ) # type: Union[int, torch.Tensor] event_handlers_stats =", "engine: Engine) -> None: \"\"\"Attach HandlersTimeProfiler to the given engine.", "function: 157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258", "(), {})) # dataflow timer engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset) engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (),", "_as_last_epoch_completed(self, engine: Engine) -> None: t = self._event_handlers_timer.value() e =", "a filename .. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text", "torch.Tensor] self._events = [ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED,", "| 0.00000/17 | 0.00000 | 0.00000 - Events.ITERATION_STARTED: ['PiecewiseLinear'] 0.03482", "engine: Engine) -> None: self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value() # Remove added", "| 0.00003 - Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ] 2.57860 | 0.11529/0", "0.00003 0.252387 0.125676 1.0 2.0 0.00029 0.252342 0.125123 \"\"\" try:", "-> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]: # compute on non-zero", "Timer() self.dataflow_times = torch.zeros(1) self.processing_times = torch.zeros(1) self.event_handlers_times = {}", "if hasattr(func, \"_profiler_original\"): engine._event_handlers[e][i] = (func._profiler_original, args, kwargs) def _as_first_started(self,", "\"\"\" events_to_ignore = [ Events.EXCEPTION_RAISED, Events.TERMINATE, Events.TERMINATE_SINGLE_EPOCH, Events.DATALOADER_STOP_ITERATION, ] def", "{h: [] for h in event_handlers_names[e]} for e in event_handlers_names}", "-------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049", "None: # handler used for profiling dataflow times t =", "2 row_format_lst = [\"\"] header_sep_lst = [\"\"] line_length_lst = [-SPACING_SIZE]", "header_sep_lst[0] += \"-\" * padding + (\" \" * SPACING_SIZE)", "Events.EPOCH_STARTED: torch.zeros(num_epochs), Events.EPOCH_COMPLETED: torch.zeros(num_epochs), Events.ITERATION_STARTED: torch.zeros(total_num_iters), Events.ITERATION_COMPLETED: torch.zeros(total_num_iters), Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),", "profiling processing times t = self._processing_timer.value() self.processing_times.append(t) def _timeit_dataflow(self) ->", "float], Union[str, float]] mean = \"None\" # type: Union[str, float]", "triggered ----------------------------------------- ----------------------- -------------- Total 22.04571 ----------------------------------------- ----------------------- -------------- Processing", "data = torch.as_tensor(times, dtype=torch.float32) # compute on non-zero data: data", "# get name of the callable handler return getattr(handler, \"__qualname__\",", "the profiler and attach an engine to it profiler =", "\"_\"), self._compute_basic_stats(self.event_handlers_times[e])) for e in Events if e not in", "profiler = HandlersTimeProfiler() profiler.attach(trainer) @trainer.on(Events.EPOCH_COMPLETED) def log_intermediate_results(): profiler.print_results(profiler.get_results()) trainer.run(dataloader, max_epochs=3)", "----------------------------------------- ----------------------- -------------- run.<locals>.log_training_results EPOCH_COMPLETED 19.43245 run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271 run.<locals>.log_time", "value=0) for x in cols] results_dump = torch.stack(cols, dim=1).numpy() results_df", "def _as_first_started(self, engine: Engine) -> None: # wraps original handlers", "dict( [ (str(e.name).replace(\".\", \"_\"), self._compute_basic_stats(self.event_handlers_times[e])) for e in Events if", "\"iteration\", \"processing_stats\", \"dataflow_stats\", \"Event_STARTED\", \"Event_COMPLETED\", \"Event_EPOCH_STARTED\", \"Event_EPOCH_COMPLETED\", \"Event_ITERATION_STARTED\", \"Event_ITERATION_COMPLETED\", \"Event_GET_BATCH_STARTED\",", "hasattr(engine.state.dataloader, \"__len__\"): num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type] else: if", "e, m in zip(self._events, self._lmethods): engine._event_handlers[e].append((m, (engine,), {})) # Let's", "} # Setup all other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {})) for", "self.dataflow_times = [] # type: List[float] self.processing_times = [] #", ".. code-block:: python profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- epoch", "| 0.26905/0 | 0.07730 | 0.01258 Dataflow: 6.11384 | 0.00008/1935", "{ITERATION_COMPLETED_names} {ITERATION_COMPLETED} - Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names} {EPOCH_COMPLETED} - Events.COMPLETED: {COMPLETED_names} {COMPLETED}", "data=results_dump, columns=[ \"epoch\", \"iteration\", \"processing_stats\", \"dataflow_stats\", \"Event_STARTED\", \"Event_COMPLETED\", \"Event_EPOCH_STARTED\", \"Event_EPOCH_COMPLETED\",", "self._event_handlers_timer.value() hname = self._get_callable_name(handler) # filter profiled time if the", "= \"{}/{}\".format(*row[3]) # type: ignore[misc] row[4] = \"{}/{}\".format(*row[4]) # type:", "max([x.numel() for x in cols]) count_col = torch.arange(max_len, dtype=torch.float32) +", "| 0.00300 | 0.02693 Event handlers: 2.82721 - Events.STARTED: []", "data: data = data[data > 0] total = round(torch.sum(data).item(), 5)", "engine: Engine) -> None: \"\"\"Attach BasicTimeProfiler to the given engine.", "] + [(\"total_time\", total_eh_time)] # type: ignore[list-item] ) return OrderedDict(", "trainer.run(dataloader, max_epochs=3) profiler.write_results('path_to_dir/time_profiling.csv') .. versionadded:: 0.4.6 \"\"\" events_to_ignore = [", "EPOCH_COMPLETED 2.55271 run.<locals>.log_time EPOCH_COMPLETED 0.00049 run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106 run.<locals>.log_training_loss ITERATION_COMPLETED", "\"\"\" ---------------------------------------------------- | Time profiling stats (in seconds): | ----------------------------------------------------", "OrderedDict(out) def get_results(self) -> Dict[str, Dict[str, Any]]: \"\"\" Method to", "= max([x.numel() for x in cols]) count_col = torch.arange(max_len, dtype=torch.float32)", "Tuple[Union[str, float], Union[str, float]]]]: data = torch.as_tensor(times, dtype=torch.float32) # compute", "if len(data) > 0 else \"not triggered\" # type: Union[str,", "Engine) -> None: \"\"\"Attach HandlersTimeProfiler to the given engine. Args:", "triggered \"\"\" def to_str(v: Union[str, tuple]) -> str: if isinstance(v,", "dim=1, ).numpy() results_df = pd.DataFrame( data=results_dump, columns=[ \"epoch\", \"iteration\", \"processing_stats\",", "used for profiling dataflow times t = self._dataflow_timer.value() self.dataflow_times.append(t) def", "to the given engine. Args: engine: the instance of Engine", "HandlersTimeProfiler: \"\"\" HandlersTimeProfiler can be used to profile the handlers,", "if \"BasicTimeProfiler.\" not in repr(h) # avoid adding internal handlers", "not isinstance(engine, Engine): raise TypeError(f\"Argument engine should be ignite.engine.Engine, but", "if e not in self.events_to_ignore ] + [(\"total_time\", total_eh_time)] #", "for (h, _, _) in engine._event_handlers[e] if \"BasicTimeProfiler.\" not in", "t = self._event_handlers_timer.value() hname = self._get_callable_name(handler) # filter profiled time", "in d.values()]) return out others = { k: odict_to_str(v) if", "count_col) headers.insert(0, \"#\") # pad all tensors to have same", "in headers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_format = row_format_lst[0] header_sep = header_sep_lst[0] result", "handlers into output ] for e in Events if e", "0.252342 0.125123 \"\"\" try: import pandas as pd except ImportError:", "torch.zeros(total_num_iters), } def _as_first_started(self, engine: Engine) -> None: if hasattr(engine.state.dataloader,", "[ [ h, getattr(e, \"name\", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ] for", "self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters) event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters) event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch) event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)", "None: # reverts handlers to original handlers for e in", "elif isinstance(v, tuple): return f\"{v[0]:.5f}/{v[1]}\" return f\"{v:.5f}\" def odict_to_str(d: Mapping)", "float]]]: \"\"\" Method to fetch the aggregated profiler results after", "type: Tuple[Union[str, float], Union[str, float]] max_index = (\"None\", \"None\") #", "] 2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790", "return [total, min_index, max_index, mean, std] event_handler_stats = [ [", "if not hasattr(handler, \"_parent\") or t >= self.EVENT_FILTER_THESHOLD_TIME: self.event_handlers_times[event][hname].append(t) #", "not engine.has_event_handler(self._as_first_started): engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {})) def get_results(self) -> List[List[Union[str,", "str: out = \" | \".join([to_str(v) for v in d.values()])", "Engine): raise TypeError(f\"Argument engine should be ignite.engine.Engine, but given {type(engine)}\")", "{ e: [ h.__qualname__ if hasattr(h, \"__qualname__\") else h.__class__.__name__ for", "code-block:: text ----------------------------------------- ----------------------- -------------- ... Handler Event Name Total(s)", "append(row_format.format(*results[-3])) append(header_sep) summary_format = \"{} took total {}s [min/index: {},", "this case.\" \"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix", "event_handlers_names: Mapping[EventEnum, List[str]]) -> None: # reset the variables used", "= self._processing_timer.value() self.processing_times.append(t) def _timeit_dataflow(self) -> None: # handler used", "event_started, event_completed, event_epoch_started, event_epoch_completed, event_iter_started, event_iter_completed, event_batch_started, event_batch_completed, ], dim=1,", "attach(self, engine: Engine) -> None: \"\"\"Attach BasicTimeProfiler to the given", "# type: Tuple[Union[str, float], Union[str, float]] max_index = (\"None\", \"None\")", "processing_stats, dataflow_stats, event_started, event_completed, event_epoch_started, event_epoch_completed, event_iter_started, event_iter_completed, event_batch_started, event_batch_completed,", "ignite.handlers.timing import Timer class BasicTimeProfiler: \"\"\" BasicTimeProfiler can be used", "] for e in self.event_handlers_times for h in self.event_handlers_times[e] ]", "headers[2:]: add_column(DEFAULT_COLUMN_WIDTH) row_format = row_format_lst[0] header_sep = header_sep_lst[0] result =", "Engine(train_updater) # Create an object of the profiler and attach", "else \"not yet triggered\") ] # type: List[Tuple[str, Union[str, float,", "f\"{v[0]:.5f}/{v[1]}\" return f\"{v:.5f}\" def odict_to_str(d: Mapping) -> str: out =", "0.01129s/937, mean: 0.00866s, std: 0.00113s] \"\"\" # adopted implementation of", "\"\"\".format( processing_stats=odict_to_str(results[\"processing_stats\"]), dataflow_stats=odict_to_str(results[\"dataflow_stats\"]), **others, ) print(output_message) return output_message class HandlersTimeProfiler:", "handlers to original handlers for e in engine._event_handlers: for i,", "else h.__class__.__name__ for (h, _, _) in engine._event_handlers[e] if \"BasicTimeProfiler.\"", "event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED] event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED] event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED] event_batch_completed", "back the wrapped handlers with original handlers at the end", "Timer() self._processing_timer = Timer() self._event_handlers_timer = Timer() self.dataflow_times = torch.zeros(1)", "out others = { k: odict_to_str(v) if isinstance(v, OrderedDict) else", "v in d.values()]) return out others = { k: odict_to_str(v)", "original handler after profiling setattr(_timeit_handler, \"_profiler_original\", handler) return _timeit_handler def", "i = engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t def _as_first_get_batch_completed(self,", "def to_str(v: Union[str, tuple]) -> str: if isinstance(v, str): return", "row append(row_format.format(*results[-3])) append(header_sep) summary_format = \"{} took total {}s [min/index:", "run .. code-block:: python results = profiler.get_results() \"\"\" total_eh_time =", "profile the handlers, events, data loading and data processing times.", "# type: Tuple[Union[str, float], Union[str, float]] mean = \"None\" #", "if len(data) > 0 else \"not yet triggered\") ] #", "for e, m in zip(self._events, self._fmethods): engine.remove_event_handler(m, e) for e,", "import BasicTimeProfiler trainer = Engine(train_updater) # Create an object of", "profiler.write_results('path_to_dir/awesome_filename.csv') Examples: .. code-block:: text ----------------------------------------------------------------- epoch iteration processing_stats dataflow_stats", "[ (str(e.name).replace(\".\", \"_\"), self._compute_basic_stats(self.event_handlers_times[e])) for e in Events if e", "else: if engine.state.epoch_length is None: raise ValueError( \"As epoch_length is", "not use BasicTimeProfiler in this case.\" \"Please, set trainer.run(..., epoch_length=epoch_length)", "-> str: \"\"\" Method to print the aggregated results from", "0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001 -", "[torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode=\"constant\", value=0) for x in", "engine._allowed_events } self._reset(self.event_handlers_names) for e in engine._allowed_events: for i, (func,", "\"None\" # type: Union[str, float] std = \"None\" # type:", "self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]] @staticmethod def", "is not set, we can not use BasicTimeProfiler in this", "profiler.print_results(results) Examples: .. code-block:: text ----------------------------------------- ----------------------- -------------- ... Handler", "(torch.max(data).item(), torch.argmax(data).item())), (\"mean\", torch.mean(data).item()), (\"std\", torch.std(data).item()), ] return OrderedDict(out) def", "args, kwargs) def _as_first_started(self, engine: Engine) -> None: # wraps", "-> None: # handler used for profiling processing times t", "original handlers at the end engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers) def attach(self, engine:", "write results as files\") processing_stats = torch.tensor(self.processing_times, dtype=torch.float32) dataflow_stats =", "compute on non-zero data: data = data[data > 0] out", "= engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_STARTED][i] = t self._processing_timer.reset() def _as_first_iter_completed(self,", "\"\"\"Attach BasicTimeProfiler to the given engine. Args: engine: the instance", "t = self._event_handlers_timer.value() i = engine.state.iteration - 1 self.event_handlers_times[Events.ITERATION_COMPLETED][i] =", "in self.event_handlers_times[e] ] event_handler_stats.append([\"Total\", \"\", total_eh_time, \"\", \"\", \"\", \"\"])", "e, v in self.event_handlers_names.items()}, ), ] ) def write_results(self, output_path:", "[ sum(self.event_handlers_times[e][h]) for e in self.event_handlers_times for h in self.event_handlers_times[e]", "+ \"}\" + (\" \" * SPACING_SIZE) header_sep_lst[0] += \"-\"", "all other handlers: engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {})) for e, m in", "def _is_internal_handler(handler: Callable) -> bool: # checks whether the handler", "self.dataflow_times = torch.zeros(total_num_iters) self.processing_times = torch.zeros(total_num_iters) self.event_handlers_times = { Events.STARTED:", "reverts handlers to original handlers for e in engine._event_handlers: for", "be used to profile the handlers, events, data loading and", "for profiling dataflow times t = self._dataflow_timer.value() self.dataflow_times.append(t) def _reset(self,", "= [ [ h, getattr(e, \"name\", str(e)), *compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)), ]", "round(torch.std(data).item(), 5) return [total, min_index, max_index, mean, std] event_handler_stats =", ") total_eh_time = round(float(total_eh_time), 5) def compute_basic_stats( times: Union[Sequence, torch.Tensor]", "from the profiler Args: results: the aggregated results from the", "= engine.state.iteration - 1 self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t d = self._dataflow_timer.value()", "= [ \"Handler\", \"Event Name\", \"Total(s)\", \"Min(s)/IDX\", \"Max(s)/IDX\", \"Mean(s)\", \"Std(s)\",", "self._events = [ Events.EPOCH_STARTED, Events.EPOCH_COMPLETED, Events.ITERATION_STARTED, Events.ITERATION_COMPLETED, Events.GET_BATCH_STARTED, Events.GET_BATCH_COMPLETED, Events.COMPLETED," ]
[ "cycles DONE ! {datetime.now()}\") await asyncio.sleep(5) finally: # in every", "for v in pmatrix_neglog.df.columns: # vertex source if v in", "get: base_curr -- (sell_price - fee) --> quote_curr def __getitem__(self,", "asyncio.sleep(5) finally: # in every case cancel the background task", "the actual pair pair = properpairs[p] fee = pair.fees[0].get('fee') #", "toolbar_location=None) # # graph = from_networkx(G, nx.spring_layout, scale=2, center=(0, 0))", "bid_price: Decimal, fee_pct: Decimal): if self.assets is None: # retrieve", "negative weight sum (cycle need to be more than one", "quote_curr -- (buy_price - fee) --> base_curr self.df[base][quote] = ((100", "fees - small data for quick compute) # - websockets", "if v in min_dist.keys(): # otherwise distance infinite until we", "G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph) # from bokeh.io import output_file, show", "for w in pmatrix_neglog.df.columns: # vertex target if w not", "upd.pairname.quote fee = properpairs[upd.pairname].fees[0].get('fee') await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee)", "isinstance(assets, Assets): assets = [a for a in assets.values()] self.df", "set(assets[p.base] for p in proper_userpairs.values()) | set(assets[p.quote] for p in", "asyncio.Lock assets: typing.Optional[Assets] def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]): self.wlock =", "in every case cancel the background task now bgtsk.cancel() #", "= (*path, min_pred[path[-1]]) # First cycle retrieved is *likely* (?)", "unify iterable of pairs somehow... properpairs = pairs pairs =", "update pricematrix base = upd.pairname.base quote = upd.pairname.quote fee =", "# NOT implemented ! # # find negative weight sum", "updates if isinstance(pairs, AssetPairs): # TODO : we need to", "await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee) # TODO : 2", "know it... for w in pmatrix_neglog.df.columns: # vertex target if", "copy all values and take -log() for c in self.df.columns:", "len(negcycle): amnt = 1 # arbitrary starting amount pred =", ": # negloggraph = neglog.to_graph() # # negcycle = list()", "- fee) --> base_curr self.df[base][quote] = ((100 - fee_pct)/100) /", "((100 - fee_pct)/100) / ask_price # ask price to get:", "item not in self.df.columns: raise KeyError(f\"{item} not found\") if item", "right fee depending on total traded volume ! await pmatrix(base=pair.base,", "in min_dist.keys() or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: min_dist[w] =", "p in pairs]) # TODO : build price matrix for", "\"BTC\"]) pm[\"EUR\"][\"BTC\"] = Decimal(1.234) pm[\"BTC\"][\"EUR\"] = Decimal(4.321) assert pm[\"EUR\"][\"BTC\"] ==", "None async def __call__(self, base: Asset, ask_price: Decimal, quote: Asset,", "np import asyncio import typing from aiokraken.model.assetpair import AssetPair from", "await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee) class PriceMatrix: # Note", "from datetime import datetime from decimal import Decimal from math", "do arbitrage and find cycles... df: pd.DataFrame # we also", "-> {amnt} {pred}\" print(f\"ARBITRAGE POSSIBLE: {dscr}\") # TODO : from", "min_pred[w]) while len(set(path)) == len(path): # while no duplicates, cycle", "dtype='float64') self.assets = None async def __call__(self, base: Asset, ask_price:", "for cn in reversed(negcycle[:-1]): amnt = amnt * pmtx[pred][cn] pred", "# TODO : 2 levels : # - slow updates", "w in pmatrix_neglog.df.columns: if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: #", "not None and ( p.base in proper_userassets or p.quote in", "since we want to do arbitrage and find cycles... df:", "base = upd.pairname.base quote = upd.pairname.quote fee = properpairs[upd.pairname].fees[0].get('fee') await", "aiokraken.rest import AssetPairs, Assets from aiokraken.model.asset import Asset from aiokraken.rest.client", "PriceMatrix(assets=proper_related_assets) # running ticker updates in background bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs,", "and ( p.base in proper_userassets or p.quote in proper_userassets )})", "pricematrix changes while True: # TODO : efficient TUI lib", "fee_pct=fee) # TODO : 2 levels : # - slow", "in pmatrix_neglog.df.columns: if v in min_dist.keys(): # otherwise node is", "but stored as numpy floats for faster compute self.df[quote][base] =", "retrieving widely related assets related_assets = set(assets[p.base] for p in", "proper_userpairs.values()) proper_related_assets = Assets({a.restname: a for a in related_assets}) pmtx", "return G def test_pricematrix_mapping(): # testing with string for simplicity", "cycles... df: pd.DataFrame # we also need to be careful", "careful that only one writer can modify data at a", "opportunities ?? # Another way : # negloggraph = neglog.to_graph()", "dscr + f\" -> {amnt} {pred}\" print(f\"ARBITRAGE POSSIBLE: {dscr}\") #", "# display... neglog = pmtx.neglog() if neglog: negcycle = bellmanford(neglog)", "cycle retrieved is *likely* (?) to be the minimal one", "if not isinstance(quote, Asset): quote = self.assets[quote].restname # These are", "min_pred[path[-1]]) # First cycle retrieved is *likely* (?) to be", "async for upd in ticker(pairs=pairs, restclient=client): print(f\"wss ==> tick: {upd}\")", "or p.quote in proper_userassets )}) # retrieving widely related assets", "compute) async for upd in ticker(pairs=pairs, restclient=client): print(f\"wss ==> tick:", "# print(negcycle) # print(f\"computing cycles DONE ! {datetime.now()}\") await asyncio.sleep(5)", "running ticker updates in background bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx)) try:", "= [a for a in assets.values()] self.df = pd.DataFrame(data={c.restname: {c.restname:", ": 2 levels : # - slow updates with wide", "find negative weight sum (cycle need to be more than", "assets], dtype='float64') self.assets = None async def __call__(self, base: Asset,", "cycle[1:])) < 0: # print(f\"Found one: {cycle}\") # negcycle.append(cycle) #", "[a for a in assets.values()] self.df = pd.DataFrame(data={c.restname: {c.restname: None", "fees - detailed data & precise compute) async for upd", "+ f\" -> {amnt} {pred}\" print(f\"ARBITRAGE POSSIBLE: {dscr}\") # TODO", "await asyncio.sleep(5) finally: # in every case cancel the background", "time... wlock: asyncio.Lock assets: typing.Optional[Assets] def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]):", "negcycle[-1] dscr = f\"{amnt} {pred}\" for cn in reversed(negcycle[:-1]): amnt", "amnt = amnt * pmtx[pred][cn] pred = cn dscr =", "return pd.Series(dtype=pd.dtype('decimal')) return self.df[item] def __len__(self): return len(self.df.columns) def __str__(self):", "If we can still relax edges, then we have a", "tkrs.items(): # retrieve the actual pair pair = properpairs[p] fee", "= bellmanford(neglog) if len(negcycle): amnt = 1 # arbitrary starting", "for w in pmatrix_neglog.df.columns: if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:", "assets: typing.Union[Assets, typing.Iterable[Asset]]): self.wlock = asyncio.Lock() if isinstance(assets, Assets): assets", "pm[\"BTC\"][\"EUR\"] = Decimal(4.321) assert pm[\"EUR\"][\"BTC\"] == Decimal(1.234) assert pm[\"BTC\"][\"EUR\"] ==", "= nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph) # from bokeh.io import output_file, show #", "{} # Relax edges |V - 1| times for i", "complete... path = (*path, min_pred[path[-1]]) # First cycle retrieved is", "n = len(pmatrix_neglog) min_dist = {source: 0} min_pred = {}", "retrieve assets for filtering calls params, only once. self.assets =", "actual pair pair = properpairs[p] fee = pair.fees[0].get('fee') # TODO", "# # graph = from_networkx(G, nx.spring_layout, scale=2, center=(0, 0)) #", "these we can extract market making opportunities ?? # Another", "fee_pct) /100) # bid price to get: quote_curr -- (buy_price", "only one we are interested in return path[path.index(path[-1]):] return ()", "in proper_userpairs.values()) proper_related_assets = Assets({a.restname: a for a in related_assets})", "as numpy floats for faster compute self.df[quote][base] = bid_price *", "(?) to be the minimal one -> the only one", "def neglog(self): if not self.assets: return False newpm = PriceMatrix(assets=[self.assets[c]", "weight sum (cycle need to be more than one node)", "reversed(negcycle[:-1]): amnt = amnt * pmtx[pred][cn] pred = cn dscr", "print(f\"wss ==> tick: {upd}\") # update pricematrix base = upd.pairname.base", "def __getitem__(self, item): if item not in self.df.columns: raise KeyError(f\"{item}", "# tools=\"\", toolbar_location=None) # # graph = from_networkx(G, nx.spring_layout, scale=2,", "# If we can still relax edges, then we have", "min_pred = {} # Relax edges |V - 1| times", "# plot = figure(title=\"Networkx Integration Demonstration\", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1),", "n, m in zip(cycle, cycle[1:])) < 0: # print(f\"Found one:", "related_assets = set(assets[p.base] for p in proper_userpairs.values()) | set(assets[p.quote] for", "# bid price to get: quote_curr -- (buy_price - fee)", "no duplicates, cycle is not complete... path = (*path, min_pred[path[-1]])", "volume ! await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee) # TODO", "websockets with potential arbitrage (including fees - detailed data &", "pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee) class PriceMatrix: # Note This", "== Decimal(4.321) async def arbiter(user_assets): assets = await client.retrieve_assets() proper_userassets", "fix this : is it on row, or columns ?", "background bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx)) try: # observe pricematrix changes", "pmtx.neglog() if neglog: negcycle = bellmanford(neglog) if len(negcycle): amnt =", "are interested in return path[path.index(path[-1]):] return () if __name__ ==", "CYCLE FOUND !\") # # # Now find it #", "p in proper_userpairs.values()) proper_related_assets = Assets({a.restname: a for a in", "market making opportunities ?? # Another way : # negloggraph", "# negcycle = list() # # if nx.negative_edge_cycle(negloggraph): # #", "First cycle retrieved is *likely* (?) to be the minimal", "Asset): quote = self.assets[quote].restname # These are done with decimal,", "# from bokeh.plotting import figure, from_networkx # # plot =", "is not None and ( p.base in proper_userassets or p.quote", "bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'): n = len(pmatrix_neglog) min_dist = {source: 0}", "# # Now find it # print(f\"computing cycles... {datetime.now()}\") #", "import networkx as nx client = RestClient() async def ticker_updates(pairs:", "fee_pct=fee) class PriceMatrix: # Note This matrix is square #", "print(f\"computing cycles DONE ! {datetime.now()}\") await asyncio.sleep(5) finally: # in", "RestClient from aiokraken.websockets.publicapi import ticker import networkx as nx client", "negcycle = list() # # if nx.negative_edge_cycle(negloggraph): # # find", "related assets related_assets = set(assets[p.base] for p in proper_userpairs.values()) |", "neglog(self): if not self.assets: return False newpm = PriceMatrix(assets=[self.assets[c] for", "ask_price # ask price to get: base_curr -- (sell_price -", "if not self.assets: return False newpm = PriceMatrix(assets=[self.assets[c] for c", "G def test_pricematrix_mapping(): # testing with string for simplicity for", "nx client = RestClient() async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix):", "- 1| times for i in range(n - 1): #", "import datetime from decimal import Decimal from math import log", "assert pm[\"EUR\"][\"BTC\"] == Decimal(1.234) assert pm[\"BTC\"][\"EUR\"] == Decimal(4.321) async def", "in reversed(negcycle[:-1]): amnt = amnt * pmtx[pred][cn] pred = cn", "newpm.df[c] = np.negative(np.log(self.df[c])) return newpm def to_graph(self): G = nx.from_pandas_adjacency(self.df,", "fee) --> base_curr self.df[base][quote] = ((100 - fee_pct)/100) / ask_price", "This matrix is square # since we want to do", "required pairs, get ticket updates if isinstance(pairs, AssetPairs): # TODO", "extract market making opportunities ?? # Another way : #", "and take -log() for c in self.df.columns: # TODO :", "# ask price to get: base_curr -- (sell_price - fee)", "isinstance(quote, Asset): quote = self.assets[quote].restname # These are done with", "self.df[item] def __len__(self): return len(self.df.columns) def __str__(self): return self.df.to_string() def", "AssetPairs({p.wsname: p for p in pairs}) tkrs = await client.ticker(pairs=[p", "for p in proper_userpairs.values()) | set(assets[p.quote] for p in proper_userpairs.values())", "min_dist.keys(): # otherwise distance infinite until we know it... for", "{c.restname: None for c in assets} for c in assets},", "aiokraken.model.assetpair import AssetPair from aiokraken.rest import AssetPairs, Assets from aiokraken.model.asset", "in nx.simple_cycles(negloggraph): # # for cycle in nx.cycle_basis(negloggraph): # NOT", "square # since we want to do arbitrage and find", "= pmtx.to_graph() # display... neglog = pmtx.neglog() if neglog: negcycle", "vertex target if w not in min_dist.keys() or min_dist[w] >", "--> base_curr self.df[base][quote] = ((100 - fee_pct)/100) / ask_price #", "# TODO : we need to unify iterable of pairs", "data & precise compute) async for upd in ticker(pairs=pairs, restclient=client):", "(buy_price - fee) --> base_curr self.df[base][quote] = ((100 - fee_pct)/100)", "not found\") if item not in self.df: return pd.Series(dtype=pd.dtype('decimal')) return", "1): # iterations for v in pmatrix_neglog.df.columns: # vertex source", "Decimal, quote: Asset, bid_price: Decimal, fee_pct: Decimal): if self.assets is", "every case cancel the background task now bgtsk.cancel() # TODO:", "from aiokraken.rest.client import RestClient from aiokraken.websockets.publicapi import ticker import networkx", ": pick the right fee depending on total traded volume", "until we know it... for w in pmatrix_neglog.df.columns: # vertex", "relevant here for w in pmatrix_neglog.df.columns: if min_dist[w] > min_dist[v]", ": fix this : is it on row, or columns", "print(f\"Found one: {cycle}\") # negcycle.append(cycle) # print(negcycle) # print(f\"computing cycles", "Note This matrix is square # since we want to", ": is it on row, or columns ? which is", "# print(f\"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}\") path = (w, min_pred[w])", "<gh_stars>0 \"\"\" Bellman Ford Arbitrage implementation over websocket API. \"\"\"", "quote = self.assets[quote].restname # These are done with decimal, but", "= ((100 - fee_pct)/100) / ask_price # ask price to", "newpm def to_graph(self): G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph) # from bokeh.io", "show(plot) return G def test_pricematrix_mapping(): # testing with string for", "TODO: react ! def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'): n = len(pmatrix_neglog)", "as nx client = RestClient() async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]],", "min_dist[w] = min_dist[v] + pmatrix_neglog[v][w] min_pred[w] = v # If", "pairs}) tkrs = await client.ticker(pairs=[p for p in pairs]) #", "# TODO : efficient TUI lib ! # print(pmtx) #", "a in related_assets}) pmtx = PriceMatrix(assets=proper_related_assets) # running ticker updates", "pmtx = PriceMatrix(assets=proper_related_assets) # running ticker updates in background bgtsk", "if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: # print(f\"{min_dist[w]} > {min_dist[v]}", "# if nx.negative_edge_cycle(negloggraph): # # find it ! # print(\"NEGATIVE", "pmatrix): # For required pairs, get ticket updates if isinstance(pairs,", "pm[\"BTC\"][\"EUR\"] == Decimal(4.321) async def arbiter(user_assets): assets = await client.retrieve_assets()", "upd.pairname.base quote = upd.pairname.quote fee = properpairs[upd.pairname].fees[0].get('fee') await pmatrix(base=base, quote=quote,", "pmatrix_neglog[v][w] min_pred[w] = v # If we can still relax", "in tkrs.items(): # retrieve the actual pair pair = properpairs[p]", "which is best ?? newpm.df[c] = np.negative(np.log(self.df[c])) return newpm def", "self.df[quote][base] = bid_price * ((100 - fee_pct) /100) # bid", "min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: # print(f\"{min_dist[w]} > {min_dist[v]} +", "print(negcycle) # print(f\"computing cycles DONE ! {datetime.now()}\") await asyncio.sleep(5) finally:", "__init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]): self.wlock = asyncio.Lock() if isinstance(assets, Assets):", "to get: quote_curr -- (buy_price - fee) --> base_curr self.df[base][quote]", "= await client.ticker(pairs=[p for p in pairs]) # TODO :", "is it on row, or columns ? which is best", "-- (buy_price - fee) --> base_curr self.df[base][quote] = ((100 -", "arbitrage (including fees - detailed data & precise compute) async", "else: properpairs = AssetPairs({p.wsname: p for p in pairs}) tkrs", "if not isinstance(base, Asset): base = self.assets[base].restname if not isinstance(quote,", "> min_dist[v] + pmatrix_neglog[v][w]: # print(f\"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}\")", "iterations for v in pmatrix_neglog.df.columns: # vertex source if v", "for c in self.df.columns: # TODO : fix this :", "These are done with decimal, but stored as numpy floats", "than one node) # if sum(negloggraph[n][m].get('weight') for n, m in", "in assets}, columns=[c.restname for c in assets], dtype='float64') self.assets =", "display... neglog = pmtx.neglog() if neglog: negcycle = bellmanford(neglog) if", "bid_price * ((100 - fee_pct) /100) # bid price to", "> min_dist[v] + pmatrix_neglog[v][w]: min_dist[w] = min_dist[v] + pmatrix_neglog[v][w] min_pred[w]", "m in zip(cycle, cycle[1:])) < 0: # print(f\"Found one: {cycle}\")", "base_curr -- (sell_price - fee) --> quote_curr def __getitem__(self, item):", "in assets.values()] self.df = pd.DataFrame(data={c.restname: {c.restname: None for c in", "interested in return path[path.index(path[-1]):] return () if __name__ == '__main__':", ": we need to unify iterable of pairs somehow... properpairs", "in range(n - 1): # iterations for v in pmatrix_neglog.df.columns:", "min_dist = {source: 0} min_pred = {} # Relax edges", "() if __name__ == '__main__': asyncio.run(arbiter(user_assets=[\"XTZ\", \"ETH\", \"XBT\", \"EUR\"]), debug=True)", "way : # negloggraph = neglog.to_graph() # # negcycle =", "is *likely* (?) to be the minimal one -> the", "= cn dscr = dscr + f\" -> {amnt} {pred}\"", "ask_price: Decimal, quote: Asset, bid_price: Decimal, fee_pct: Decimal): if self.assets", "a for a in related_assets}) pmtx = PriceMatrix(assets=proper_related_assets) # running", "p in proper_userpairs.values()) | set(assets[p.quote] for p in proper_userpairs.values()) proper_related_assets", "?? # Another way : # negloggraph = neglog.to_graph() #", "from_networkx(G, nx.spring_layout, scale=2, center=(0, 0)) # plot.renderers.append(graph) # # output_file(\"networkx_graph.html\")", "tick: {upd}\") # update pricematrix base = upd.pairname.base quote =", "! # print(pmtx) # pricegraph = pmtx.to_graph() # display... neglog", "Relax edges |V - 1| times for i in range(n", "cycle for v in pmatrix_neglog.df.columns: if v in min_dist.keys(): #", "typing.Iterable[AssetPair]], pmatrix): # For required pairs, get ticket updates if", "import typing from aiokraken.model.assetpair import AssetPair from aiokraken.rest import AssetPairs,", "PriceMatrix([\"EUR\", \"BTC\"]) pm[\"EUR\"][\"BTC\"] = Decimal(1.234) pm[\"BTC\"][\"EUR\"] = Decimal(4.321) assert pm[\"EUR\"][\"BTC\"]", "async def __call__(self, base: Asset, ask_price: Decimal, quote: Asset, bid_price:", "client.retrieve_assetpairs() proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values() if p.wsname", "from bokeh.io import output_file, show # from bokeh.plotting import figure,", "# print(f\"computing cycles... {datetime.now()}\") # # for cycle in nx.simple_cycles(negloggraph):", "graph = from_networkx(G, nx.spring_layout, scale=2, center=(0, 0)) # plot.renderers.append(graph) #", "from bokeh.plotting import figure, from_networkx # # plot = figure(title=\"Networkx", "= asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx)) try: # observe pricematrix changes while True:", "not isinstance(quote, Asset): quote = self.assets[quote].restname # These are done", "# negcycle.append(cycle) # print(negcycle) # print(f\"computing cycles DONE ! {datetime.now()}\")", "= min_dist[v] + pmatrix_neglog[v][w] min_pred[w] = v # If we", ")}) # retrieving widely related assets related_assets = set(assets[p.base] for", "# First cycle retrieved is *likely* (?) to be the", "from aiokraken.rest import AssetPairs, Assets from aiokraken.model.asset import Asset from", "string for simplicity for now pm = PriceMatrix([\"EUR\", \"BTC\"]) pm[\"EUR\"][\"BTC\"]", "PriceMatrix(assets=[self.assets[c] for c in self.df.columns]) # copy all values and", "= {} # Relax edges |V - 1| times for", "pair = properpairs[p] fee = pair.fees[0].get('fee') # TODO : pick", "# - slow updates with wide list of pairs and", "take -log() for c in self.df.columns: # TODO : fix", "updates in background bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx)) try: # observe", "proper_userassets or p.quote in proper_userassets )}) # retrieving widely related", "def __len__(self): return len(self.df.columns) def __str__(self): return self.df.to_string() def neglog(self):", "= neglog.to_graph() # # negcycle = list() # # if", "= {source: 0} min_pred = {} # Relax edges |V", "here for w in pmatrix_neglog.df.columns: if min_dist[w] > min_dist[v] +", "# # find it ! # print(\"NEGATIVE CYCLE FOUND !\")", "best ?? newpm.df[c] = np.negative(np.log(self.df[c])) return newpm def to_graph(self): G", "pm[\"EUR\"][\"BTC\"] == Decimal(1.234) assert pm[\"BTC\"][\"EUR\"] == Decimal(4.321) async def arbiter(user_assets):", "negative cycle for v in pmatrix_neglog.df.columns: if v in min_dist.keys():", "bellmanford(neglog) if len(negcycle): amnt = 1 # arbitrary starting amount", "# show(plot) return G def test_pricematrix_mapping(): # testing with string", "print(\"NEGATIVE CYCLE FOUND !\") # # # Now find it", "= negcycle[-1] dscr = f\"{amnt} {pred}\" for cn in reversed(negcycle[:-1]):", "from aiokraken.model.asset import Asset from aiokraken.rest.client import RestClient from aiokraken.websockets.publicapi", "assets: typing.Optional[Assets] def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]): self.wlock = asyncio.Lock()", "# plot.renderers.append(graph) # # output_file(\"networkx_graph.html\") # show(plot) return G def", "AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values() if p.wsname is not None", "cycle in nx.simple_cycles(negloggraph): # # for cycle in nx.cycle_basis(negloggraph): #", "class PriceMatrix: # Note This matrix is square # since", "for c in assets} for c in assets}, columns=[c.restname for", "--> quote_curr def __getitem__(self, item): if item not in self.df.columns:", "len(set(path)) == len(path): # while no duplicates, cycle is not", "def __str__(self): return self.df.to_string() def neglog(self): if not self.assets: return", "import pandas as pd import numpy as np import asyncio", "aiokraken.model.asset import Asset from aiokraken.rest.client import RestClient from aiokraken.websockets.publicapi import", "depending on total traded volume ! await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price,", "+ {pmatrix_neglog[v][w]}\") path = (w, min_pred[w]) while len(set(path)) == len(path):", "one node) # if sum(negloggraph[n][m].get('weight') for n, m in zip(cycle,", "& precise compute) async for upd in ticker(pairs=pairs, restclient=client): print(f\"wss", "in nx.cycle_basis(negloggraph): # NOT implemented ! # # find negative", "from_networkx # # plot = figure(title=\"Networkx Integration Demonstration\", x_range=(-1.1, 1.1),", "{dscr}\") # TODO : from these we can extract market", "iterable of pairs somehow... properpairs = pairs pairs = [p", "a time... wlock: asyncio.Lock assets: typing.Optional[Assets] def __init__(self, assets: typing.Union[Assets,", "- fee) --> quote_curr def __getitem__(self, item): if item not", "* pmtx[pred][cn] pred = cn dscr = dscr + f\"", "still relax edges, then we have a negative cycle for", "Asset, ask_price: Decimal, quote: Asset, bid_price: Decimal, fee_pct: Decimal): if", "arbitrage and find cycles... df: pd.DataFrame # we also need", "Decimal): if self.assets is None: # retrieve assets for filtering", "we also need to be careful that only one writer", "node is not yet relevant here for w in pmatrix_neglog.df.columns:", "import figure, from_networkx # # plot = figure(title=\"Networkx Integration Demonstration\",", ": from these we can extract market making opportunities ??", "==> tick: {upd}\") # update pricematrix base = upd.pairname.base quote", "# find negative weight sum (cycle need to be more", "= len(pmatrix_neglog) min_dist = {source: 0} min_pred = {} #", "Assets({a.restname: a for a in related_assets}) pmtx = PriceMatrix(assets=proper_related_assets) #", "isinstance(base, Asset): base = self.assets[base].restname if not isinstance(quote, Asset): quote", "widely related assets related_assets = set(assets[p.base] for p in proper_userpairs.values())", "finally: # in every case cancel the background task now", "to_graph(self): G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph) # from bokeh.io import output_file,", "self.assets = None async def __call__(self, base: Asset, ask_price: Decimal,", "# copy all values and take -log() for c in", "times for i in range(n - 1): # iterations for", "figure(title=\"Networkx Integration Demonstration\", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1), # tools=\"\", toolbar_location=None)", "Assets from aiokraken.model.asset import Asset from aiokraken.rest.client import RestClient from", "f\"{amnt} {pred}\" for cn in reversed(negcycle[:-1]): amnt = amnt *", "# # if nx.negative_edge_cycle(negloggraph): # # find it ! #", "ticket updates if isinstance(pairs, AssetPairs): # TODO : we need", "values and take -log() for c in self.df.columns: # TODO", "= upd.pairname.quote fee = properpairs[upd.pairname].fees[0].get('fee') await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price,", "not in self.df: return pd.Series(dtype=pd.dtype('decimal')) return self.df[item] def __len__(self): return", "in ticker(pairs=pairs, restclient=client): print(f\"wss ==> tick: {upd}\") # update pricematrix", "distance infinite until we know it... for w in pmatrix_neglog.df.columns:", "len(self.df.columns) def __str__(self): return self.df.to_string() def neglog(self): if not self.assets:", "assets}, columns=[c.restname for c in assets], dtype='float64') self.assets = None", "the right fee depending on total traded volume ! await", "{datetime.now()}\") await asyncio.sleep(5) finally: # in every case cancel the", "import numpy as np import asyncio import typing from aiokraken.model.assetpair", "output_file, show # from bokeh.plotting import figure, from_networkx # #", "1.1), y_range=(-1.1, 1.1), # tools=\"\", toolbar_location=None) # # graph =", "cycle is not complete... path = (*path, min_pred[path[-1]]) # First", "# from bokeh.io import output_file, show # from bokeh.plotting import", "writer can modify data at a time... wlock: asyncio.Lock assets:", "self.df[base][quote] = ((100 - fee_pct)/100) / ask_price # ask price", "figure, from_networkx # # plot = figure(title=\"Networkx Integration Demonstration\", x_range=(-1.1,", "neglog.to_graph() # # negcycle = list() # # if nx.negative_edge_cycle(negloggraph):", "annotations from collections import namedtuple from datetime import datetime from", "are done with decimal, but stored as numpy floats for", "(including fees - detailed data & precise compute) async for", "Decimal(1.234) pm[\"BTC\"][\"EUR\"] = Decimal(4.321) assert pm[\"EUR\"][\"BTC\"] == Decimal(1.234) assert pm[\"BTC\"][\"EUR\"]", "# observe pricematrix changes while True: # TODO : efficient", "Decimal, fee_pct: Decimal): if self.assets is None: # retrieve assets", "# print(\"NEGATIVE CYCLE FOUND !\") # # # Now find", "to be the minimal one -> the only one we", "= Assets({a.restname: a for a in related_assets}) pmtx = PriceMatrix(assets=proper_related_assets)", "= self.assets[base].restname if not isinstance(quote, Asset): quote = self.assets[quote].restname #", "node) # if sum(negloggraph[n][m].get('weight') for n, m in zip(cycle, cycle[1:]))", "import log import pandas as pd import numpy as np", "if nx.negative_edge_cycle(negloggraph): # # find it ! # print(\"NEGATIVE CYCLE", "-- (sell_price - fee) --> quote_curr def __getitem__(self, item): if", "in pmatrix_neglog.df.columns: # vertex source if v in min_dist.keys(): #", "columns ? which is best ?? newpm.df[c] = np.negative(np.log(self.df[c])) return", "-> the only one we are interested in return path[path.index(path[-1]):]", "isinstance(pairs, AssetPairs): # TODO : we need to unify iterable", "# TODO : from these we can extract market making", "create_using=nx.DiGraph) # from bokeh.io import output_file, show # from bokeh.plotting", "task now bgtsk.cancel() # TODO: react ! def bellmanford(pmatrix_neglog: PriceMatrix,", "quote: Asset, bid_price: Decimal, fee_pct: Decimal): if self.assets is None:", "# # plot = figure(title=\"Networkx Integration Demonstration\", x_range=(-1.1, 1.1), y_range=(-1.1,", "we can extract market making opportunities ?? # Another way", "if sum(negloggraph[n][m].get('weight') for n, m in zip(cycle, cycle[1:])) < 0:", "with potential arbitrage (including fees - detailed data & precise", "while no duplicates, cycle is not complete... path = (*path,", "= pd.DataFrame(data={c.restname: {c.restname: None for c in assets} for c", "careful with concurrent control. if not isinstance(base, Asset): base =", "try: # observe pricematrix changes while True: # TODO :", "KeyError(f\"{item} not found\") if item not in self.df: return pd.Series(dtype=pd.dtype('decimal'))", "= properpairs[p] fee = pair.fees[0].get('fee') # TODO : pick the", "def arbiter(user_assets): assets = await client.retrieve_assets() proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a]", "TODO : from these we can extract market making opportunities", "from these we can extract market making opportunities ?? #", "p, tk in tkrs.items(): # retrieve the actual pair pair", "self.assets is None: # retrieve assets for filtering calls params,", "for cycle in nx.cycle_basis(negloggraph): # NOT implemented ! # #", "source if v in min_dist.keys(): # otherwise distance infinite until", "yet relevant here for w in pmatrix_neglog.df.columns: if min_dist[w] >", "for faster compute self.df[quote][base] = bid_price * ((100 - fee_pct)", "pairs = [p for p in pairs.values()] else: properpairs =", "done with decimal, but stored as numpy floats for faster", "if item not in self.df: return pd.Series(dtype=pd.dtype('decimal')) return self.df[item] def", "min_pred[w] = v # If we can still relax edges,", "pd.DataFrame # we also need to be careful that only", "pmtx.to_graph() # display... neglog = pmtx.neglog() if neglog: negcycle =", "{pred}\" print(f\"ARBITRAGE POSSIBLE: {dscr}\") # TODO : from these we", "with concurrent control. if not isinstance(base, Asset): base = self.assets[base].restname", "(cycle need to be more than one node) # if", "item not in self.df: return pd.Series(dtype=pd.dtype('decimal')) return self.df[item] def __len__(self):", "pred = negcycle[-1] dscr = f\"{amnt} {pred}\" for cn in", "proper_userassets )}) # retrieving widely related assets related_assets = set(assets[p.base]", "(no fees - small data for quick compute) # -", "not yet relevant here for w in pmatrix_neglog.df.columns: if min_dist[w]", "TODO : build price matrix for p, tk in tkrs.items():", "otherwise distance infinite until we know it... for w in", "if isinstance(assets, Assets): assets = [a for a in assets.values()]", "for quick compute) # - websockets with potential arbitrage (including", "bid_price=upd.bid.price, fee_pct=fee) class PriceMatrix: # Note This matrix is square", "# These are done with decimal, but stored as numpy", "f\" -> {amnt} {pred}\" print(f\"ARBITRAGE POSSIBLE: {dscr}\") # TODO :", "as np import asyncio import typing from aiokraken.model.assetpair import AssetPair", "pick the right fee depending on total traded volume !", "for upd in ticker(pairs=pairs, restclient=client): print(f\"wss ==> tick: {upd}\") #", "pmatrix=pmtx)) try: # observe pricematrix changes while True: # TODO", "pairs.values()] else: properpairs = AssetPairs({p.wsname: p for p in pairs})", "typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix): # For required pairs, get ticket updates", "numpy as np import asyncio import typing from aiokraken.model.assetpair import", "__str__(self): return self.df.to_string() def neglog(self): if not self.assets: return False", "if neglog: negcycle = bellmanford(neglog) if len(negcycle): amnt = 1", "proper_related_assets = Assets({a.restname: a for a in related_assets}) pmtx =", "{min_dist[v]} + {pmatrix_neglog[v][w]}\") path = (w, min_pred[w]) while len(set(path)) ==", "= Decimal(1.234) pm[\"BTC\"][\"EUR\"] = Decimal(4.321) assert pm[\"EUR\"][\"BTC\"] == Decimal(1.234) assert", "# retrieve the actual pair pair = properpairs[p] fee =", "= dscr + f\" -> {amnt} {pred}\" print(f\"ARBITRAGE POSSIBLE: {dscr}\")", "calls params, only once. self.assets = await client.retrieve_assets() async with", "need to unify iterable of pairs somehow... properpairs = pairs", "base = self.assets[base].restname if not isinstance(quote, Asset): quote = self.assets[quote].restname", "# Now find it # print(f\"computing cycles... {datetime.now()}\") # #", "compute self.df[quote][base] = bid_price * ((100 - fee_pct) /100) #", ": efficient TUI lib ! # print(pmtx) # pricegraph =", "price matrix for p, tk in tkrs.items(): # retrieve the", "return len(self.df.columns) def __str__(self): return self.df.to_string() def neglog(self): if not", "now pm = PriceMatrix([\"EUR\", \"BTC\"]) pm[\"EUR\"][\"BTC\"] = Decimal(1.234) pm[\"BTC\"][\"EUR\"] =", "for simplicity for now pm = PriceMatrix([\"EUR\", \"BTC\"]) pm[\"EUR\"][\"BTC\"] =", "min_dist.keys() or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: min_dist[w] = min_dist[v]", "Decimal(1.234) assert pm[\"BTC\"][\"EUR\"] == Decimal(4.321) async def arbiter(user_assets): assets =", "import namedtuple from datetime import datetime from decimal import Decimal", "for i in range(n - 1): # iterations for v", "for filtering calls params, only once. self.assets = await client.retrieve_assets()", "aiokraken.websockets.publicapi import ticker import networkx as nx client = RestClient()", "in pairs]) # TODO : build price matrix for p,", "i in range(n - 1): # iterations for v in", "# while no duplicates, cycle is not complete... path =", "the only one we are interested in return path[path.index(path[-1]):] return", "it ! # print(\"NEGATIVE CYCLE FOUND !\") # # #", "to get: base_curr -- (sell_price - fee) --> quote_curr def", "# retrieving widely related assets related_assets = set(assets[p.base] for p", "minimal one -> the only one we are interested in", "pair.fees[0].get('fee') # TODO : pick the right fee depending on", "= AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values() if p.wsname is not", "v in min_dist.keys(): # otherwise node is not yet relevant", "control. if not isinstance(base, Asset): base = self.assets[base].restname if not", "properpairs = pairs pairs = [p for p in pairs.values()]", "pmtx[pred][cn] pred = cn dscr = dscr + f\" ->", "Arbitrage implementation over websocket API. \"\"\" from __future__ import annotations", "potential arbitrage (including fees - detailed data & precise compute)", "amnt = 1 # arbitrary starting amount pred = negcycle[-1]", "0: # print(f\"Found one: {cycle}\") # negcycle.append(cycle) # print(negcycle) #", "implemented ! # # find negative weight sum (cycle need", "! await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee) # TODO :", "quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee) class PriceMatrix: # Note This matrix", "= from_networkx(G, nx.spring_layout, scale=2, center=(0, 0)) # plot.renderers.append(graph) # #", "ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee) class PriceMatrix: # Note This matrix is", "properpairs[p] fee = pair.fees[0].get('fee') # TODO : pick the right", "# # find negative weight sum (cycle need to be", "1| times for i in range(n - 1): # iterations", "w not in min_dist.keys() or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:", "Integration Demonstration\", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1), # tools=\"\", toolbar_location=None) #", "in self.df.columns: raise KeyError(f\"{item} not found\") if item not in", "= list() # # if nx.negative_edge_cycle(negloggraph): # # find it", "PriceMatrix: # Note This matrix is square # since we", "p in assetpairs.values() if p.wsname is not None and (", "set(assets[p.quote] for p in proper_userpairs.values()) proper_related_assets = Assets({a.restname: a for", "{source: 0} min_pred = {} # Relax edges |V -", "async with self.wlock: # careful with concurrent control. if not", "# print(pmtx) # pricegraph = pmtx.to_graph() # display... neglog =", "a in user_assets}) assetpairs = await client.retrieve_assetpairs() proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p", "bgtsk.cancel() # TODO: react ! def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'): n", "\"\"\" from __future__ import annotations from collections import namedtuple from", "matrix is square # since we want to do arbitrage", "= upd.pairname.base quote = upd.pairname.quote fee = properpairs[upd.pairname].fees[0].get('fee') await pmatrix(base=base,", "return False newpm = PriceMatrix(assets=[self.assets[c] for c in self.df.columns]) #", "testing with string for simplicity for now pm = PriceMatrix([\"EUR\",", "= figure(title=\"Networkx Integration Demonstration\", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1), # tools=\"\",", "{datetime.now()}\") # # for cycle in nx.simple_cycles(negloggraph): # # for", "assets for filtering calls params, only once. self.assets = await", "from aiokraken.websockets.publicapi import ticker import networkx as nx client =", "for p in pairs.values()] else: properpairs = AssetPairs({p.wsname: p for", "PriceMatrix, source='ZEUR'): n = len(pmatrix_neglog) min_dist = {source: 0} min_pred", "filtering calls params, only once. self.assets = await client.retrieve_assets() async", "upd in ticker(pairs=pairs, restclient=client): print(f\"wss ==> tick: {upd}\") # update", "fee = properpairs[upd.pairname].fees[0].get('fee') await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee) class", "c in assets], dtype='float64') self.assets = None async def __call__(self,", "on row, or columns ? which is best ?? newpm.df[c]", "p.wsname is not None and ( p.base in proper_userassets or", "= await client.retrieve_assets() async with self.wlock: # careful with concurrent", "pmatrix_neglog[v][w]: min_dist[w] = min_dist[v] + pmatrix_neglog[v][w] min_pred[w] = v #", "self.assets: return False newpm = PriceMatrix(assets=[self.assets[c] for c in self.df.columns])", "if w not in min_dist.keys() or min_dist[w] > min_dist[v] +", "if item not in self.df.columns: raise KeyError(f\"{item} not found\") if", "typing from aiokraken.model.assetpair import AssetPair from aiokraken.rest import AssetPairs, Assets", "for c in assets], dtype='float64') self.assets = None async def", "amnt * pmtx[pred][cn] pred = cn dscr = dscr +", "None: # retrieve assets for filtering calls params, only once.", "0} min_pred = {} # Relax edges |V - 1|", "# arbitrary starting amount pred = negcycle[-1] dscr = f\"{amnt}", "properpairs[upd.pairname].fees[0].get('fee') await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee) class PriceMatrix: #", "starting amount pred = negcycle[-1] dscr = f\"{amnt} {pred}\" for", "# TODO : pick the right fee depending on total", "modify data at a time... wlock: asyncio.Lock assets: typing.Optional[Assets] def", "arbiter(user_assets): assets = await client.retrieve_assets() proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for", "pd.DataFrame(data={c.restname: {c.restname: None for c in assets} for c in", "TODO : fix this : is it on row, or", "# for cycle in nx.simple_cycles(negloggraph): # # for cycle in", "output_file(\"networkx_graph.html\") # show(plot) return G def test_pricematrix_mapping(): # testing with", "it... for w in pmatrix_neglog.df.columns: # vertex target if w", "= pair.fees[0].get('fee') # TODO : pick the right fee depending", "-log() for c in self.df.columns: # TODO : fix this", "client.retrieve_assets() proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets}) assetpairs", "client.retrieve_assets() async with self.wlock: # careful with concurrent control. if", "assetpairs = await client.retrieve_assetpairs() proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in", "- websockets with potential arbitrage (including fees - detailed data", "with self.wlock: # careful with concurrent control. if not isinstance(base,", "| set(assets[p.quote] for p in proper_userpairs.values()) proper_related_assets = Assets({a.restname: a", "# # # Now find it # print(f\"computing cycles... {datetime.now()}\")", "be more than one node) # if sum(negloggraph[n][m].get('weight') for n,", "vertex source if v in min_dist.keys(): # otherwise distance infinite", "# negloggraph = neglog.to_graph() # # negcycle = list() #", "show # from bokeh.plotting import figure, from_networkx # # plot", "infinite until we know it... for w in pmatrix_neglog.df.columns: #", "fee) --> quote_curr def __getitem__(self, item): if item not in", "is not complete... path = (*path, min_pred[path[-1]]) # First cycle", "from collections import namedtuple from datetime import datetime from decimal", "neglog: negcycle = bellmanford(neglog) if len(negcycle): amnt = 1 #", "None for c in assets} for c in assets}, columns=[c.restname", "1.1), # tools=\"\", toolbar_location=None) # # graph = from_networkx(G, nx.spring_layout,", "print(pmtx) # pricegraph = pmtx.to_graph() # display... neglog = pmtx.neglog()", "we can still relax edges, then we have a negative", "self.wlock = asyncio.Lock() if isinstance(assets, Assets): assets = [a for", "potential interest (no fees - small data for quick compute)", "= bid_price * ((100 - fee_pct) /100) # bid price", "y_range=(-1.1, 1.1), # tools=\"\", toolbar_location=None) # # graph = from_networkx(G,", "v in pmatrix_neglog.df.columns: if v in min_dist.keys(): # otherwise node", "w in pmatrix_neglog.df.columns: # vertex target if w not in", "self.df: return pd.Series(dtype=pd.dtype('decimal')) return self.df[item] def __len__(self): return len(self.df.columns) def", "pricematrix base = upd.pairname.base quote = upd.pairname.quote fee = properpairs[upd.pairname].fees[0].get('fee')", "amount pred = negcycle[-1] dscr = f\"{amnt} {pred}\" for cn", "await client.retrieve_assets() proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets})", "for p in pairs}) tkrs = await client.ticker(pairs=[p for p", "# find it ! # print(\"NEGATIVE CYCLE FOUND !\") #", "! # print(\"NEGATIVE CYCLE FOUND !\") # # # Now", "not complete... path = (*path, min_pred[path[-1]]) # First cycle retrieved", "cycles... {datetime.now()}\") # # for cycle in nx.simple_cycles(negloggraph): # #", "a in assets.values()] self.df = pd.DataFrame(data={c.restname: {c.restname: None for c", "not isinstance(base, Asset): base = self.assets[base].restname if not isinstance(quote, Asset):", "be careful that only one writer can modify data at", "# vertex target if w not in min_dist.keys() or min_dist[w]", "over websocket API. \"\"\" from __future__ import annotations from collections", "= set(assets[p.base] for p in proper_userpairs.values()) | set(assets[p.quote] for p", "self.assets[base].restname if not isinstance(quote, Asset): quote = self.assets[quote].restname # These", "all values and take -log() for c in self.df.columns: #", "pairs and potential interest (no fees - small data for", "to be more than one node) # if sum(negloggraph[n][m].get('weight') for", "+ pmatrix_neglog[v][w] min_pred[w] = v # If we can still", "list of pairs and potential interest (no fees - small", "Asset from aiokraken.rest.client import RestClient from aiokraken.websockets.publicapi import ticker import", "- fee_pct) /100) # bid price to get: quote_curr --", "simplicity for now pm = PriceMatrix([\"EUR\", \"BTC\"]) pm[\"EUR\"][\"BTC\"] = Decimal(1.234)", "# Note This matrix is square # since we want", "len(pmatrix_neglog) min_dist = {source: 0} min_pred = {} # Relax", "pmatrix_neglog.df.columns: # vertex source if v in min_dist.keys(): # otherwise", "case cancel the background task now bgtsk.cancel() # TODO: react", "negloggraph = neglog.to_graph() # # negcycle = list() # #", "Ford Arbitrage implementation over websocket API. \"\"\" from __future__ import", "in min_dist.keys(): # otherwise distance infinite until we know it...", "of pairs somehow... properpairs = pairs pairs = [p for", "then we have a negative cycle for v in pmatrix_neglog.df.columns:", "1 # arbitrary starting amount pred = negcycle[-1] dscr =", "implementation over websocket API. \"\"\" from __future__ import annotations from", "# vertex source if v in min_dist.keys(): # otherwise distance", "self.assets = await client.retrieve_assets() async with self.wlock: # careful with", "def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'): n = len(pmatrix_neglog) min_dist = {source:", "client.ticker(pairs=[p for p in pairs]) # TODO : build price", "tk in tkrs.items(): # retrieve the actual pair pair =", "precise compute) async for upd in ticker(pairs=pairs, restclient=client): print(f\"wss ==>", "data at a time... wlock: asyncio.Lock assets: typing.Optional[Assets] def __init__(self,", "min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: min_dist[w] = min_dist[v] + pmatrix_neglog[v][w]", "= Decimal(4.321) assert pm[\"EUR\"][\"BTC\"] == Decimal(1.234) assert pm[\"BTC\"][\"EUR\"] == Decimal(4.321)", "= await client.retrieve_assetpairs() proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values()", "= 1 # arbitrary starting amount pred = negcycle[-1] dscr", "if p.wsname is not None and ( p.base in proper_userassets", "we have a negative cycle for v in pmatrix_neglog.df.columns: if", "= (w, min_pred[w]) while len(set(path)) == len(path): # while no", "pmatrix_neglog.df.columns: if v in min_dist.keys(): # otherwise node is not", "p.quote in proper_userassets )}) # retrieving widely related assets related_assets", "= pmtx.neglog() if neglog: negcycle = bellmanford(neglog) if len(negcycle): amnt", "# TODO: react ! def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'): n =", "proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets}) assetpairs =", "# since we want to do arbitrage and find cycles...", "typing.Iterable[Asset]]): self.wlock = asyncio.Lock() if isinstance(assets, Assets): assets = [a", "(sell_price - fee) --> quote_curr def __getitem__(self, item): if item", "in related_assets}) pmtx = PriceMatrix(assets=proper_related_assets) # running ticker updates in", "newpm = PriceMatrix(assets=[self.assets[c] for c in self.df.columns]) # copy all", "self.df.columns: raise KeyError(f\"{item} not found\") if item not in self.df:", "levels : # - slow updates with wide list of", "total traded volume ! await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee)", "x_range=(-1.1, 1.1), y_range=(-1.1, 1.1), # tools=\"\", toolbar_location=None) # # graph", "async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix): # For required pairs,", "min_dist[v] + pmatrix_neglog[v][w] min_pred[w] = v # If we can", "base_curr self.df[base][quote] = ((100 - fee_pct)/100) / ask_price # ask", "__call__(self, base: Asset, ask_price: Decimal, quote: Asset, bid_price: Decimal, fee_pct:", "c in self.df.columns]) # copy all values and take -log()", "v in pmatrix_neglog.df.columns: # vertex source if v in min_dist.keys():", "numpy floats for faster compute self.df[quote][base] = bid_price * ((100", "TODO : we need to unify iterable of pairs somehow...", "asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx)) try: # observe pricematrix changes while True: #", "datetime import datetime from decimal import Decimal from math import", "and potential interest (no fees - small data for quick", "to be careful that only one writer can modify data", "! {datetime.now()}\") await asyncio.sleep(5) finally: # in every case cancel", "dscr = f\"{amnt} {pred}\" for cn in reversed(negcycle[:-1]): amnt =", "for c in assets}, columns=[c.restname for c in assets], dtype='float64')", "get: quote_curr -- (buy_price - fee) --> base_curr self.df[base][quote] =", "- 1): # iterations for v in pmatrix_neglog.df.columns: # vertex", "if self.assets is None: # retrieve assets for filtering calls", "somehow... properpairs = pairs pairs = [p for p in", "in background bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx)) try: # observe pricematrix", "v # If we can still relax edges, then we", "== len(path): # while no duplicates, cycle is not complete...", "bid_price=tk.bid.price, fee_pct=fee) # TODO : 2 levels : # -", "tkrs = await client.ticker(pairs=[p for p in pairs]) # TODO", "assert pm[\"BTC\"][\"EUR\"] == Decimal(4.321) async def arbiter(user_assets): assets = await", "{pred}\" for cn in reversed(negcycle[:-1]): amnt = amnt * pmtx[pred][cn]", "*likely* (?) to be the minimal one -> the only", "it # print(f\"computing cycles... {datetime.now()}\") # # for cycle in", "For required pairs, get ticket updates if isinstance(pairs, AssetPairs): #", "sum (cycle need to be more than one node) #", "+ pmatrix_neglog[v][w]: min_dist[w] = min_dist[v] + pmatrix_neglog[v][w] min_pred[w] = v", "if v in min_dist.keys(): # otherwise node is not yet", "Decimal(4.321) async def arbiter(user_assets): assets = await client.retrieve_assets() proper_userassets =", "for a in user_assets}) assetpairs = await client.retrieve_assetpairs() proper_userpairs =", "min_dist[v] + pmatrix_neglog[v][w]: min_dist[w] = min_dist[v] + pmatrix_neglog[v][w] min_pred[w] =", "! # # find negative weight sum (cycle need to", "arbitrary starting amount pred = negcycle[-1] dscr = f\"{amnt} {pred}\"", "DONE ! {datetime.now()}\") await asyncio.sleep(5) finally: # in every case", "proper_userpairs.values()) | set(assets[p.quote] for p in proper_userpairs.values()) proper_related_assets = Assets({a.restname:", "nx.cycle_basis(negloggraph): # NOT implemented ! # # find negative weight", "- small data for quick compute) # - websockets with", "print(f\"computing cycles... {datetime.now()}\") # # for cycle in nx.simple_cycles(negloggraph): #", "collections import namedtuple from datetime import datetime from decimal import", "p in pairs.values()] else: properpairs = AssetPairs({p.wsname: p for p", "dscr = dscr + f\" -> {amnt} {pred}\" print(f\"ARBITRAGE POSSIBLE:", "import AssetPairs, Assets from aiokraken.model.asset import Asset from aiokraken.rest.client import", "pairs somehow... properpairs = pairs pairs = [p for p", "Another way : # negloggraph = neglog.to_graph() # # negcycle", "in proper_userassets or p.quote in proper_userassets )}) # retrieving widely", "= None async def __call__(self, base: Asset, ask_price: Decimal, quote:", "one -> the only one we are interested in return", "source='ZEUR'): n = len(pmatrix_neglog) min_dist = {source: 0} min_pred =", "log import pandas as pd import numpy as np import", "# Relax edges |V - 1| times for i in", "if isinstance(pairs, AssetPairs): # TODO : we need to unify", "= v # If we can still relax edges, then", "datetime from decimal import Decimal from math import log import", "once. self.assets = await client.retrieve_assets() async with self.wlock: # careful", "Assets): assets = [a for a in assets.values()] self.df =", "in self.df.columns: # TODO : fix this : is it", "pmatrix_neglog.df.columns: # vertex target if w not in min_dist.keys() or", "want to do arbitrage and find cycles... df: pd.DataFrame #", "# # output_file(\"networkx_graph.html\") # show(plot) return G def test_pricematrix_mapping(): #", "nx.spring_layout, scale=2, center=(0, 0)) # plot.renderers.append(graph) # # output_file(\"networkx_graph.html\") #", "!\") # # # Now find it # print(f\"computing cycles...", "# otherwise node is not yet relevant here for w", "nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph) # from bokeh.io import output_file, show # from", "get ticket updates if isinstance(pairs, AssetPairs): # TODO : we", "cycle in nx.cycle_basis(negloggraph): # NOT implemented ! # # find", "# for cycle in nx.cycle_basis(negloggraph): # NOT implemented ! #", "pd.Series(dtype=pd.dtype('decimal')) return self.df[item] def __len__(self): return len(self.df.columns) def __str__(self): return", "params, only once. self.assets = await client.retrieve_assets() async with self.wlock:", "target if w not in min_dist.keys() or min_dist[w] > min_dist[v]", "it on row, or columns ? which is best ??", "(*path, min_pred[path[-1]]) # First cycle retrieved is *likely* (?) to", "price to get: quote_curr -- (buy_price - fee) --> base_curr", "assets} for c in assets}, columns=[c.restname for c in assets],", "Demonstration\", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1), # tools=\"\", toolbar_location=None) # #", "c in assets} for c in assets}, columns=[c.restname for c", "= PriceMatrix(assets=[self.assets[c] for c in self.df.columns]) # copy all values", "base: Asset, ask_price: Decimal, quote: Asset, bid_price: Decimal, fee_pct: Decimal):", "self.df.to_string() def neglog(self): if not self.assets: return False newpm =", "changes while True: # TODO : efficient TUI lib !", "RestClient() async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix): # For required", "lib ! # print(pmtx) # pricegraph = pmtx.to_graph() # display...", "in pairs}) tkrs = await client.ticker(pairs=[p for p in pairs])", "for p in pairs]) # TODO : build price matrix", "/ ask_price # ask price to get: base_curr -- (sell_price", "if len(negcycle): amnt = 1 # arbitrary starting amount pred", "making opportunities ?? # Another way : # negloggraph =", "is not yet relevant here for w in pmatrix_neglog.df.columns: if", "# update pricematrix base = upd.pairname.base quote = upd.pairname.quote fee", "quote = upd.pairname.quote fee = properpairs[upd.pairname].fees[0].get('fee') await pmatrix(base=base, quote=quote, ask_price=upd.ask.price,", "in assets], dtype='float64') self.assets = None async def __call__(self, base:", "background task now bgtsk.cancel() # TODO: react ! def bellmanford(pmatrix_neglog:", "the background task now bgtsk.cancel() # TODO: react ! def", "websocket API. \"\"\" from __future__ import annotations from collections import", "otherwise node is not yet relevant here for w in", "this : is it on row, or columns ? which", "is best ?? newpm.df[c] = np.negative(np.log(self.df[c])) return newpm def to_graph(self):", "efficient TUI lib ! # print(pmtx) # pricegraph = pmtx.to_graph()", "aiokraken.rest.client import RestClient from aiokraken.websockets.publicapi import ticker import networkx as", "def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]): self.wlock = asyncio.Lock() if isinstance(assets,", "* ((100 - fee_pct) /100) # bid price to get:", "> {min_dist[v]} + {pmatrix_neglog[v][w]}\") path = (w, min_pred[w]) while len(set(path))", "import RestClient from aiokraken.websockets.publicapi import ticker import networkx as nx", "# careful with concurrent control. if not isinstance(base, Asset): base", "raise KeyError(f\"{item} not found\") if item not in self.df: return", "at a time... wlock: asyncio.Lock assets: typing.Optional[Assets] def __init__(self, assets:", "namedtuple from datetime import datetime from decimal import Decimal from", "for c in self.df.columns]) # copy all values and take", "None and ( p.base in proper_userassets or p.quote in proper_userassets", "pred = cn dscr = dscr + f\" -> {amnt}", "not in min_dist.keys() or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: min_dist[w]", "# pricegraph = pmtx.to_graph() # display... neglog = pmtx.neglog() if", "wide list of pairs and potential interest (no fees -", "AssetPair from aiokraken.rest import AssetPairs, Assets from aiokraken.model.asset import Asset", "plot = figure(title=\"Networkx Integration Demonstration\", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1), #", "pm = PriceMatrix([\"EUR\", \"BTC\"]) pm[\"EUR\"][\"BTC\"] = Decimal(1.234) pm[\"BTC\"][\"EUR\"] = Decimal(4.321)", "pairs, get ticket updates if isinstance(pairs, AssetPairs): # TODO :", "stored as numpy floats for faster compute self.df[quote][base] = bid_price", "neglog = pmtx.neglog() if neglog: negcycle = bellmanford(neglog) if len(negcycle):", "= amnt * pmtx[pred][cn] pred = cn dscr = dscr", "POSSIBLE: {dscr}\") # TODO : from these we can extract", "= properpairs[upd.pairname].fees[0].get('fee') await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee) class PriceMatrix:", "retrieve the actual pair pair = properpairs[p] fee = pair.fees[0].get('fee')", "one: {cycle}\") # negcycle.append(cycle) # print(negcycle) # print(f\"computing cycles DONE", "for v in pmatrix_neglog.df.columns: if v in min_dist.keys(): # otherwise", "bokeh.io import output_file, show # from bokeh.plotting import figure, from_networkx", "while len(set(path)) == len(path): # while no duplicates, cycle is", "is square # since we want to do arbitrage and", "AssetPairs): # TODO : we need to unify iterable of", "build price matrix for p, tk in tkrs.items(): # retrieve", "{pmatrix_neglog[v][w]}\") path = (w, min_pred[w]) while len(set(path)) == len(path): #", "assets[a] for a in user_assets}) assetpairs = await client.retrieve_assetpairs() proper_userpairs", "ticker(pairs=pairs, restclient=client): print(f\"wss ==> tick: {upd}\") # update pricematrix base", "False newpm = PriceMatrix(assets=[self.assets[c] for c in self.df.columns]) # copy", "print(f\"ARBITRAGE POSSIBLE: {dscr}\") # TODO : from these we can", "Decimal from math import log import pandas as pd import", "API. \"\"\" from __future__ import annotations from collections import namedtuple", "fee_pct: Decimal): if self.assets is None: # retrieve assets for", "TODO : efficient TUI lib ! # print(pmtx) # pricegraph", "assetpairs.values() if p.wsname is not None and ( p.base in", "def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix): # For required pairs, get", "pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee) # TODO : 2 levels", "= f\"{amnt} {pred}\" for cn in reversed(negcycle[:-1]): amnt = amnt", "( p.base in proper_userassets or p.quote in proper_userassets )}) #", "to do arbitrage and find cycles... df: pd.DataFrame # we", "react ! def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'): n = len(pmatrix_neglog) min_dist", "await client.retrieve_assets() async with self.wlock: # careful with concurrent control.", "in assets} for c in assets}, columns=[c.restname for c in", "in assetpairs.values() if p.wsname is not None and ( p.base", "or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: min_dist[w] = min_dist[v] +", "and find cycles... df: pd.DataFrame # we also need to", "is None: # retrieve assets for filtering calls params, only", "in self.df: return pd.Series(dtype=pd.dtype('decimal')) return self.df[item] def __len__(self): return len(self.df.columns)", "# in every case cancel the background task now bgtsk.cancel()", "# # for cycle in nx.cycle_basis(negloggraph): # NOT implemented !", "return () if __name__ == '__main__': asyncio.run(arbiter(user_assets=[\"XTZ\", \"ETH\", \"XBT\", \"EUR\"]),", "\"\"\" Bellman Ford Arbitrage implementation over websocket API. \"\"\" from", "c in assets}, columns=[c.restname for c in assets], dtype='float64') self.assets", "row, or columns ? which is best ?? newpm.df[c] =", "columns=[c.restname for c in assets], dtype='float64') self.assets = None async", "in pairs.values()] else: properpairs = AssetPairs({p.wsname: p for p in", "need to be careful that only one writer can modify", "traded volume ! await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee) #", "for p, tk in tkrs.items(): # retrieve the actual pair", "Asset, bid_price: Decimal, fee_pct: Decimal): if self.assets is None: #", "ask price to get: base_curr -- (sell_price - fee) -->", "test_pricematrix_mapping(): # testing with string for simplicity for now pm", "# otherwise distance infinite until we know it... for w", "fee_pct)/100) / ask_price # ask price to get: base_curr --", "assets = await client.retrieve_assets() proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a", "decimal import Decimal from math import log import pandas as", "Now find it # print(f\"computing cycles... {datetime.now()}\") # # for", "# print(f\"computing cycles DONE ! {datetime.now()}\") await asyncio.sleep(5) finally: #", "can extract market making opportunities ?? # Another way :", "in pmatrix_neglog.df.columns: # vertex target if w not in min_dist.keys()", "one writer can modify data at a time... wlock: asyncio.Lock", "for p in assetpairs.values() if p.wsname is not None and", "AssetPairs, Assets from aiokraken.model.asset import Asset from aiokraken.rest.client import RestClient", "fee depending on total traded volume ! await pmatrix(base=pair.base, quote=pair.quote,", "self.df.columns: # TODO : fix this : is it on", "+ pmatrix_neglog[v][w]: # print(f\"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}\") path =", "p for p in pairs}) tkrs = await client.ticker(pairs=[p for", "{upd}\") # update pricematrix base = upd.pairname.base quote = upd.pairname.quote", "observe pricematrix changes while True: # TODO : efficient TUI", "cancel the background task now bgtsk.cancel() # TODO: react !", "edges |V - 1| times for i in range(n -", "not in self.df.columns: raise KeyError(f\"{item} not found\") if item not", "= pairs pairs = [p for p in pairs.values()] else:", "self.df.columns]) # copy all values and take -log() for c", "= self.assets[quote].restname # These are done with decimal, but stored", "floats for faster compute self.df[quote][base] = bid_price * ((100 -", "one we are interested in return path[path.index(path[-1]):] return () if", "__future__ import annotations from collections import namedtuple from datetime import", "quick compute) # - websockets with potential arbitrage (including fees", ": build price matrix for p, tk in tkrs.items(): #", "quote_curr def __getitem__(self, item): if item not in self.df.columns: raise", ": # - slow updates with wide list of pairs", "df: pd.DataFrame # we also need to be careful that", "c in self.df.columns: # TODO : fix this : is", "self.wlock: # careful with concurrent control. if not isinstance(base, Asset):", "# print(f\"Found one: {cycle}\") # negcycle.append(cycle) # print(negcycle) # print(f\"computing", "def to_graph(self): G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph) # from bokeh.io import", "for p in proper_userpairs.values()) proper_related_assets = Assets({a.restname: a for a", "# iterations for v in pmatrix_neglog.df.columns: # vertex source if", "that only one writer can modify data at a time...", "# graph = from_networkx(G, nx.spring_layout, scale=2, center=(0, 0)) # plot.renderers.append(graph)", "negcycle = bellmanford(neglog) if len(negcycle): amnt = 1 # arbitrary", "sum(negloggraph[n][m].get('weight') for n, m in zip(cycle, cycle[1:])) < 0: #", "in return path[path.index(path[-1]):] return () if __name__ == '__main__': asyncio.run(arbiter(user_assets=[\"XTZ\",", "p.base in proper_userassets or p.quote in proper_userassets )}) # retrieving", "quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee) # TODO : 2 levels :", "we need to unify iterable of pairs somehow... properpairs =", "find cycles... df: pd.DataFrame # we also need to be", "= Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets}) assetpairs = await", "= PriceMatrix(assets=proper_related_assets) # running ticker updates in background bgtsk =", "2 levels : # - slow updates with wide list", "plot.renderers.append(graph) # # output_file(\"networkx_graph.html\") # show(plot) return G def test_pricematrix_mapping():", "def __call__(self, base: Asset, ask_price: Decimal, quote: Asset, bid_price: Decimal,", "decimal, but stored as numpy floats for faster compute self.df[quote][base]", "with string for simplicity for now pm = PriceMatrix([\"EUR\", \"BTC\"])", "# running ticker updates in background bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx))", "bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx)) try: # observe pricematrix changes while", "{cycle}\") # negcycle.append(cycle) # print(negcycle) # print(f\"computing cycles DONE !", "math import log import pandas as pd import numpy as", "[p for p in pairs.values()] else: properpairs = AssetPairs({p.wsname: p", "compute) # - websockets with potential arbitrage (including fees -", "__len__(self): return len(self.df.columns) def __str__(self): return self.df.to_string() def neglog(self): if", "= np.negative(np.log(self.df[c])) return newpm def to_graph(self): G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph)", "range(n - 1): # iterations for v in pmatrix_neglog.df.columns: #", "# # for cycle in nx.simple_cycles(negloggraph): # # for cycle", "also need to be careful that only one writer can", "in proper_userpairs.values()) | set(assets[p.quote] for p in proper_userpairs.values()) proper_related_assets =", "can modify data at a time... wlock: asyncio.Lock assets: typing.Optional[Assets]", "for a in assets.values()] self.df = pd.DataFrame(data={c.restname: {c.restname: None for", "self.df = pd.DataFrame(data={c.restname: {c.restname: None for c in assets} for", "v in min_dist.keys(): # otherwise distance infinite until we know", "have a negative cycle for v in pmatrix_neglog.df.columns: if v", "async def arbiter(user_assets): assets = await client.retrieve_assets() proper_userassets = Assets(assets_as_dict={assets[a].restname:", "{amnt} {pred}\" print(f\"ARBITRAGE POSSIBLE: {dscr}\") # TODO : from these", "ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix): # For required pairs, get ticket", "import AssetPair from aiokraken.rest import AssetPairs, Assets from aiokraken.model.asset import", "await client.ticker(pairs=[p for p in pairs]) # TODO : build", "assets.values()] self.df = pd.DataFrame(data={c.restname: {c.restname: None for c in assets}", "interest (no fees - small data for quick compute) #", "== Decimal(1.234) assert pm[\"BTC\"][\"EUR\"] == Decimal(4.321) async def arbiter(user_assets): assets", "tools=\"\", toolbar_location=None) # # graph = from_networkx(G, nx.spring_layout, scale=2, center=(0,", "np.negative(np.log(self.df[c])) return newpm def to_graph(self): G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph) #", "return self.df[item] def __len__(self): return len(self.df.columns) def __str__(self): return self.df.to_string()", "pair pair = properpairs[p] fee = pair.fees[0].get('fee') # TODO :", "now bgtsk.cancel() # TODO: react ! def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'):", "wlock: asyncio.Lock assets: typing.Optional[Assets] def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]): self.wlock", "assets = [a for a in assets.values()] self.df = pd.DataFrame(data={c.restname:", "ticker import networkx as nx client = RestClient() async def", "networkx as nx client = RestClient() async def ticker_updates(pairs: typing.Union[AssetPairs,", "|V - 1| times for i in range(n - 1):", "len(path): # while no duplicates, cycle is not complete... path", "only one writer can modify data at a time... wlock:", "TODO : pick the right fee depending on total traded", "in user_assets}) assetpairs = await client.retrieve_assetpairs() proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for", "retrieved is *likely* (?) to be the minimal one ->", "await client.retrieve_assetpairs() proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values() if", "proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values() if p.wsname is", "import Asset from aiokraken.rest.client import RestClient from aiokraken.websockets.publicapi import ticker", "for a in related_assets}) pmtx = PriceMatrix(assets=proper_related_assets) # running ticker", "data for quick compute) # - websockets with potential arbitrage", "small data for quick compute) # - websockets with potential", "in proper_userassets )}) # retrieving widely related assets related_assets =", "asyncio.Lock() if isinstance(assets, Assets): assets = [a for a in", "detailed data & precise compute) async for upd in ticker(pairs=pairs,", "path[path.index(path[-1]):] return () if __name__ == '__main__': asyncio.run(arbiter(user_assets=[\"XTZ\", \"ETH\", \"XBT\",", "negcycle.append(cycle) # print(negcycle) # print(f\"computing cycles DONE ! {datetime.now()}\") await", "# we also need to be careful that only one", "from aiokraken.model.assetpair import AssetPair from aiokraken.rest import AssetPairs, Assets from", "bid price to get: quote_curr -- (buy_price - fee) -->", "# - websockets with potential arbitrage (including fees - detailed", "not self.assets: return False newpm = PriceMatrix(assets=[self.assets[c] for c in", "in pmatrix_neglog.df.columns: if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: # print(f\"{min_dist[w]}", "we are interested in return path[path.index(path[-1]):] return () if __name__", "concurrent control. if not isinstance(base, Asset): base = self.assets[base].restname if", "= [p for p in pairs.values()] else: properpairs = AssetPairs({p.wsname:", "fee = pair.fees[0].get('fee') # TODO : pick the right fee", "of pairs and potential interest (no fees - small data", "with wide list of pairs and potential interest (no fees", "self.assets[quote].restname # These are done with decimal, but stored as", "# if sum(negloggraph[n][m].get('weight') for n, m in zip(cycle, cycle[1:])) <", "min_dist[v] + pmatrix_neglog[v][w]: # print(f\"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}\") path", "more than one node) # if sum(negloggraph[n][m].get('weight') for n, m", "= RestClient() async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix): # For", "as pd import numpy as np import asyncio import typing", "__getitem__(self, item): if item not in self.df.columns: raise KeyError(f\"{item} not", "we know it... for w in pmatrix_neglog.df.columns: # vertex target", "need to be more than one node) # if sum(negloggraph[n][m].get('weight')", "list() # # if nx.negative_edge_cycle(negloggraph): # # find it !", "import ticker import networkx as nx client = RestClient() async", "TUI lib ! # print(pmtx) # pricegraph = pmtx.to_graph() #", "= PriceMatrix([\"EUR\", \"BTC\"]) pm[\"EUR\"][\"BTC\"] = Decimal(1.234) pm[\"BTC\"][\"EUR\"] = Decimal(4.321) assert", "def test_pricematrix_mapping(): # testing with string for simplicity for now", "typing.Optional[Assets] def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]): self.wlock = asyncio.Lock() if", "path = (*path, min_pred[path[-1]]) # First cycle retrieved is *likely*", "from math import log import pandas as pd import numpy", "# For required pairs, get ticket updates if isinstance(pairs, AssetPairs):", "# TODO : build price matrix for p, tk in", "return self.df.to_string() def neglog(self): if not self.assets: return False newpm", "in self.df.columns]) # copy all values and take -log() for", "related_assets}) pmtx = PriceMatrix(assets=proper_related_assets) # running ticker updates in background", "in zip(cycle, cycle[1:])) < 0: # print(f\"Found one: {cycle}\") #", "faster compute self.df[quote][base] = bid_price * ((100 - fee_pct) /100)", "import Decimal from math import log import pandas as pd", "# testing with string for simplicity for now pm =", "ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee) # TODO : 2 levels : #", "- detailed data & precise compute) async for upd in", "import asyncio import typing from aiokraken.model.assetpair import AssetPair from aiokraken.rest", "while True: # TODO : efficient TUI lib ! #", "- fee_pct)/100) / ask_price # ask price to get: base_curr", "True: # TODO : efficient TUI lib ! # print(pmtx)", "updates with wide list of pairs and potential interest (no", "Bellman Ford Arbitrage implementation over websocket API. \"\"\" from __future__", "?? newpm.df[c] = np.negative(np.log(self.df[c])) return newpm def to_graph(self): G =", "pmatrix_neglog[v][w]: # print(f\"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}\") path = (w,", "return path[path.index(path[-1]):] return () if __name__ == '__main__': asyncio.run(arbiter(user_assets=[\"XTZ\", \"ETH\",", "import annotations from collections import namedtuple from datetime import datetime", "import output_file, show # from bokeh.plotting import figure, from_networkx #", "((100 - fee_pct) /100) # bid price to get: quote_curr", "= asyncio.Lock() if isinstance(assets, Assets): assets = [a for a", "Asset): base = self.assets[base].restname if not isinstance(quote, Asset): quote =", "item): if item not in self.df.columns: raise KeyError(f\"{item} not found\")", "pd import numpy as np import asyncio import typing from", "to unify iterable of pairs somehow... properpairs = pairs pairs", "path = (w, min_pred[w]) while len(set(path)) == len(path): # while", "pricegraph = pmtx.to_graph() # display... neglog = pmtx.neglog() if neglog:", "! def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'): n = len(pmatrix_neglog) min_dist =", "pmatrix_neglog.df.columns: if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]: # print(f\"{min_dist[w]} >", "nx.simple_cycles(negloggraph): # # for cycle in nx.cycle_basis(negloggraph): # NOT implemented", "duplicates, cycle is not complete... path = (*path, min_pred[path[-1]]) #", "/100) # bid price to get: quote_curr -- (buy_price -", "only once. self.assets = await client.retrieve_assets() async with self.wlock: #", "Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets}) assetpairs = await client.retrieve_assetpairs()", "slow updates with wide list of pairs and potential interest", "edges, then we have a negative cycle for v in", "return newpm def to_graph(self): G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph) # from", "FOUND !\") # # # Now find it # print(f\"computing", "or columns ? which is best ?? newpm.df[c] = np.negative(np.log(self.df[c]))", "0)) # plot.renderers.append(graph) # # output_file(\"networkx_graph.html\") # show(plot) return G", "the minimal one -> the only one we are interested", "< 0: # print(f\"Found one: {cycle}\") # negcycle.append(cycle) # print(negcycle)", "for n, m in zip(cycle, cycle[1:])) < 0: # print(f\"Found", "nx.negative_edge_cycle(negloggraph): # # find it ! # print(\"NEGATIVE CYCLE FOUND", "cn in reversed(negcycle[:-1]): amnt = amnt * pmtx[pred][cn] pred =", "asyncio import typing from aiokraken.model.assetpair import AssetPair from aiokraken.rest import", "from __future__ import annotations from collections import namedtuple from datetime", "bokeh.plotting import figure, from_networkx # # plot = figure(title=\"Networkx Integration", "a negative cycle for v in pmatrix_neglog.df.columns: if v in", "(w, min_pred[w]) while len(set(path)) == len(path): # while no duplicates,", "on total traded volume ! await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price,", "found\") if item not in self.df: return pd.Series(dtype=pd.dtype('decimal')) return self.df[item]", "user_assets}) assetpairs = await client.retrieve_assetpairs() proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p", "print(f\"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}\") path = (w, min_pred[w]) while", "we want to do arbitrage and find cycles... df: pd.DataFrame", "NOT implemented ! # # find negative weight sum (cycle", "cn dscr = dscr + f\" -> {amnt} {pred}\" print(f\"ARBITRAGE", "- slow updates with wide list of pairs and potential", "for now pm = PriceMatrix([\"EUR\", \"BTC\"]) pm[\"EUR\"][\"BTC\"] = Decimal(1.234) pm[\"BTC\"][\"EUR\"]", "pairs pairs = [p for p in pairs.values()] else: properpairs", "scale=2, center=(0, 0)) # plot.renderers.append(graph) # # output_file(\"networkx_graph.html\") # show(plot)", "center=(0, 0)) # plot.renderers.append(graph) # # output_file(\"networkx_graph.html\") # show(plot) return", "find it # print(f\"computing cycles... {datetime.now()}\") # # for cycle", "for cycle in nx.simple_cycles(negloggraph): # # for cycle in nx.cycle_basis(negloggraph):", "zip(cycle, cycle[1:])) < 0: # print(f\"Found one: {cycle}\") # negcycle.append(cycle)", "be the minimal one -> the only one we are", "ticker updates in background bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx)) try: #", "properpairs = AssetPairs({p.wsname: p for p in pairs}) tkrs =", "find it ! # print(\"NEGATIVE CYCLE FOUND !\") # #", "matrix for p, tk in tkrs.items(): # retrieve the actual", "client = RestClient() async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix): #", "pairs]) # TODO : build price matrix for p, tk", "min_dist.keys(): # otherwise node is not yet relevant here for", "TODO : 2 levels : # - slow updates with", "p in pairs}) tkrs = await client.ticker(pairs=[p for p in", "Decimal(4.321) assert pm[\"EUR\"][\"BTC\"] == Decimal(1.234) assert pm[\"BTC\"][\"EUR\"] == Decimal(4.321) async", "# retrieve assets for filtering calls params, only once. self.assets", "# output_file(\"networkx_graph.html\") # show(plot) return G def test_pricematrix_mapping(): # testing", "= await client.retrieve_assets() proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a in", "typing.Union[Assets, typing.Iterable[Asset]]): self.wlock = asyncio.Lock() if isinstance(assets, Assets): assets =", "from decimal import Decimal from math import log import pandas", "pandas as pd import numpy as np import asyncio import", "price to get: base_curr -- (sell_price - fee) --> quote_curr", "= AssetPairs({p.wsname: p for p in pairs}) tkrs = await", "# TODO : fix this : is it on row,", "# Another way : # negloggraph = neglog.to_graph() # #", "# # negcycle = list() # # if nx.negative_edge_cycle(negloggraph): #", "can still relax edges, then we have a negative cycle", "in min_dist.keys(): # otherwise node is not yet relevant here", "restclient=client): print(f\"wss ==> tick: {upd}\") # update pricematrix base =", "relax edges, then we have a negative cycle for v", "assets related_assets = set(assets[p.base] for p in proper_userpairs.values()) | set(assets[p.quote]", "? which is best ?? newpm.df[c] = np.negative(np.log(self.df[c])) return newpm", "with decimal, but stored as numpy floats for faster compute", "pm[\"EUR\"][\"BTC\"] = Decimal(1.234) pm[\"BTC\"][\"EUR\"] = Decimal(4.321) assert pm[\"EUR\"][\"BTC\"] == Decimal(1.234)" ]
[ "<<EMAIL>> # Creative Commons BY-NC-SA 4.0 International Public License #", "LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) # \"\"\" The Snowtire binary sensor. For", "# Copyright (c) 2020, Andrey \"Limych\" Khrolenok <<EMAIL>> # Creative", "Snowtire binary sensor. For more details about this platform, please", "this platform, please refer to the documentation at https://github.com/Limych/ha-snowtire/ \"\"\"", "International Public License # (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) # \"\"\"", "Copyright (c) 2020, Andrey \"Limych\" Khrolenok <<EMAIL>> # Creative Commons", "# (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) # \"\"\" The Snowtire binary", "\"Limych\" Khrolenok <<EMAIL>> # Creative Commons BY-NC-SA 4.0 International Public", "License # (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) # \"\"\" The Snowtire", "or https://creativecommons.org/licenses/by-nc-sa/4.0/) # \"\"\" The Snowtire binary sensor. For more", "(c) 2020, Andrey \"Limych\" Khrolenok <<EMAIL>> # Creative Commons BY-NC-SA", "\"\"\" The Snowtire binary sensor. For more details about this", "details about this platform, please refer to the documentation at", "about this platform, please refer to the documentation at https://github.com/Limych/ha-snowtire/", "Commons BY-NC-SA 4.0 International Public License # (see LICENSE.md or", "2020, Andrey \"Limych\" Khrolenok <<EMAIL>> # Creative Commons BY-NC-SA 4.0", "For more details about this platform, please refer to the", "sensor. For more details about this platform, please refer to", "Khrolenok <<EMAIL>> # Creative Commons BY-NC-SA 4.0 International Public License", "https://creativecommons.org/licenses/by-nc-sa/4.0/) # \"\"\" The Snowtire binary sensor. For more details", "# Creative Commons BY-NC-SA 4.0 International Public License # (see", "# # Copyright (c) 2020, Andrey \"Limych\" Khrolenok <<EMAIL>> #", "binary sensor. For more details about this platform, please refer", "Andrey \"Limych\" Khrolenok <<EMAIL>> # Creative Commons BY-NC-SA 4.0 International", "The Snowtire binary sensor. For more details about this platform,", "(see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) # \"\"\" The Snowtire binary sensor.", "BY-NC-SA 4.0 International Public License # (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/)", "more details about this platform, please refer to the documentation", "# \"\"\" The Snowtire binary sensor. For more details about", "4.0 International Public License # (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) #", "Public License # (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/) # \"\"\" The", "Creative Commons BY-NC-SA 4.0 International Public License # (see LICENSE.md" ]
[ "[-1.2, -1.8, -1.5], [ nan, 0.3, 0.5], [ nan, -1.3,", "y_hat = model.predict_proba(X, n_jobs=2) y = [[ 9.85225968e-01, 1.47740317e-02], [", "def test_bc_multivariate_gaussian_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1, 1,", "import with_setup from nose.tools import assert_almost_equal from nose.tools import assert_equal", "-1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00],", "-1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01],", "= numpy.array([[ 0.3, 0.5, 0.1], [ 0.8, 1.4, 0.5], [", "2.825, 2.625] cov2_t = [[0.75687500, 0.23687499, 0.47937500], [0.23687499, 0.40187499, 0.53187499],", "setup_hmm(): global model global hmm1 global hmm2 global hmm3 rigged", "MultivariateGaussianDistribution(mu, cov) mu, cov = [2, 2, 2], numpy.eye(3) d2", "[ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat", "model.predict_log_proba(X) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [", "9.99910190e-01], [ 9.99370443e-01, 6.29556825e-04], [ 2.68992964e-01, 7.31007036e-01], [ 7.69692511e-01, 2.30307489e-01],", "= model.predict_log_proba(X) y_hat2 = model.predict_log_proba(X2) y_hat3 = model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2)", "probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1],", "y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01,", "model.predict_proba(X) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [", "-7.37049444e+00], [ -1.31307006e+00, -3.13332194e-01], [ -2.61764180e-01, -1.46833995e+00], [ -2.29725479e-01, -1.58353505e+00],", "= MultivariateGaussianDistribution bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights) bc2 =", "[ 2.6, 3.6, 3.3], [ 3.1, 2.2, 1.7], [ 1.8,", "0, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed,", "DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict(X) y_hat2 = model.predict(X2) y_hat3 = model.predict(X3)", "assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597)", "teardown) def test_bc_multivariate_mixed_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1,", "nan = numpy.nan def setup_multivariate_gaussian(): mu, cov = [0, 0,", "nan, 3.6, 3.3], [ 3.1, nan, 1.7], [ nan, nan,", "= from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights)", "d23 = PoissonDistribution(3) d2 = IndependentComponentsDistribution([d21, d22, d23]) global model", "assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049)", "-0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2],", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0]", "hmm3.add_transition(rigged, rigged, 0.5) hmm3.add_transition(rigged, unrigged, 0.5) hmm3.add_transition(unrigged, rigged, 0.5) hmm3.add_transition(unrigged,", "test_hmm_log_proba(): logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515)", "= MultivariateGaussianDistribution(mu2, cov) bc1 = BayesClassifier([d1, d2]) bc1.fit(X, y, weights)", "[ 3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [ 1.2,", "* from pomegranate.io import DataGenerator from pomegranate.io import DataFrameGenerator from", "numpy.nan def setup_multivariate_gaussian(): mu, cov = [0, 0, 0], numpy.eye(3)", "test_bc_multivariate_mixed_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -5.03107596e-01, -9.27980626e-01], [", "0.4141666, 0.439166]] d21 = model.distributions[1].distributions[0] d22 = model.distributions[1].distributions[1] d23 =", "MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm, teardown) def test_model(): assert_almost_equal(hmm1.log_probability(list('H')),", "unrigged, 0.5) hmm3.add_transition(hmm3.start, rigged, 0.5) hmm3.add_transition(rigged, rigged, 0.5) hmm3.add_transition(rigged, unrigged,", "BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm, teardown) def", "0.6227777777], [0.927222222, 2.251388888, 1.340277777], [0.622777777, 1.340277777, 0.9547222222]] mu2 = model.distributions[1].parameters[0]", "model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t", "IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_robust_from_json(): model2 = from_json(model.to_json())", "1, 0, 0, 1] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_parallel():", "assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_from_samples(): model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X,", "assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_from_samples(): model", "0, 1, 1, 0, 0, 0, 0, 0] assert_array_almost_equal(y, y_hat)", "mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters, [2.625])", "test_io_predict_log_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_log_proba(X)", "assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2,", "bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights) bc2 = BayesClassifier.from_samples(d, X=data_generator)", "0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_parallel(): y_hat", "-9.80005545e+00, -5.54500620e-05], [ -5.60369104e+00, -3.69104343e-03], [ -1.78390074e+00, -1.83900741e-01], [ -3.05902274e-07,", "0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat =", "1) assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian, teardown) def test_io_log_probability(): X2 = DataGenerator(X)", "model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687499, 0.23687499,", "2.825, 2.625] cov2_t = [[0.75687499, 0.23687499, 0.4793750], [0.23687499, 0.40187499, 0.5318749],", "assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')),", "'H': 0.5, 'T':0.5 }) ) hmm1 = HiddenMarkovModel() hmm1.start =", "assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_pickle(): model2 =", "assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed, teardown)", "= model.predict_proba(X_nan) y = [[ 9.60834277e-01, 3.91657228e-02], [ 3.10025519e-01, 6.89974481e-01],", "-7.645551826734343) assert_equal(model.d, 1) @with_setup(setup_hmm, teardown) def test_hmm_log_proba(): logs = model.predict_log_proba(np.array([list('H'),", "0.48263438265925512) @with_setup(setup_hmm, teardown) def test_hmm_prediction(): predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'),", "= PoissonDistribution(3) d2 = IndependentComponentsDistribution([d21, d22, d23]) global model model", "logp2) assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict(): X2 = DataGenerator(X)", "0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict(): y_hat", "model.predict(X, n_jobs=2) y = [0, 0, 1, 1, 1, 1,", "logp3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict(): X2 = DataGenerator(X) X3 =", "assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')),", "0.22658346]) assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_from_samples(): model = BayesClassifier.from_samples(MultivariateGaussianDistribution,", "teardown) def test_bc_multivariate_gaussian_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y = [0,", "teardown) def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed,", "model.predict_proba(X) y_hat2 = model.predict_proba(X2) y_hat3 = model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1,", "= model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_log_proba():", "0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_fit_parallel(): model.fit(X,", "[ -2.29725479e-01, -1.58353505e+00], [ -1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown)", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2)", "[ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y,", "0.5) hmm3.add_transition(unrigged, rigged, 0.5) hmm3.add_transition(unrigged, unrigged, 0.5) hmm3.bake() model =", "-5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00],", "nan, 1.8], [ 1.2, 1.8, 1.5], [ nan, 0.3, 0.5],", "3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba(): y_hat", "def test_bc_multivariate_mixed_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1, 0,", "[2.925, 2.825, 2.625] cov2_t = [[0.75687499, 0.23687499, 0.4793750], [0.23687499, 0.40187499,", "3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [ 1.2, 1.8,", "d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc1 =", "7.69692511e-01, 2.30307489e-01], [ 7.94751748e-01, 2.05248252e-01], [ 3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y, y_hat)", "from nose.tools import assert_not_equal from nose.tools import assert_less_equal from nose.tools", "model.predict_log_proba(X_nan) y = [[ -3.99533332e-02, -3.23995333e+00], [ -1.17110067e+00, -3.71100666e-01], [", "pass def teardown(): pass @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3)", "nose.tools import assert_true from numpy.testing import assert_array_almost_equal import pandas import", "assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707)", "3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba():", "n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [1.033333,", "cov = [0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov)", "}) ) unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 })", "model.predict(X_nan) y = [0, 1, 1, 0, 1, 1, 1,", "-3.99533332e-02, -3.23995333e+00], [ -1.17110067e+00, -3.71100666e-01], [ -4.01814993e+00, -1.81499279e-02], [ -6.93147181e-01,", "n_jobs=2) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [", "y) d = MultivariateGaussianDistribution bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)", "= [[1.3088888, 0.9272222, 0.6227777], [0.9272222, 2.2513888, 1.3402777], [0.6227777, 1.3402777, 0.9547222]]", "test_bc_multivariate_gaussian_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))", "0.75] cov1_t = [[0.242222, 0.0594444, 0.178333], [0.059444, 0.5980555, 0.414166], [0.178333,", "X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) logp1 = model.log_probability(X) logp2", "y_hat1 = model.predict(X) y_hat2 = model.predict(X2) y_hat3 = model.predict(X3) assert_array_almost_equal(y_hat1,", "hmm2 global hmm3 rigged = State( DiscreteDistribution({ 'H': 0.8, 'T':", "= [[0.75687500, 0.23687499, 0.47937500], [0.23687499, 0.40187499, 0.53187499], [0.47937500, 0.53187499, 0.78687500]]", "test_bc_multivariate_gaussian_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -1.48842547e-02, -4.21488425e+00], [", "MultivariateGaussianDistribution bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights) bc2 = BayesClassifier.from_samples(d,", "X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict(X) y_hat2 = model.predict(X2) y_hat3", "3.6, 3.3], [ 3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8],", "1]) cov = numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1, cov) d2 =", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2)", "0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1],", "model.predict_proba(X_nan) y = [[ 9.60834277e-01, 3.91657228e-02], [ 3.10025519e-01, 6.89974481e-01], [", "y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba(): y_hat = model.predict_proba(X) y =", "from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm,", "= model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_proba():", "1.8, 1.5], [ 1.8, 0.3, 0.5], [ 0.7, 1.3, 0.1]])", "d21 = model.distributions[1].distributions[0] d22 = model.distributions[1].distributions[1] d23 = model.distributions[1].distributions[2] assert_array_almost_equal(mu1,", "[2.625]) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_from_samples(): model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)", "[ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [", "MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_to_json(): model2", "teardown) def test_bc_multivariate_gaussian_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1,", "9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat)", "3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed,", "model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) def test_io_fit(): X = numpy.random.randn(100,", "teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.99533332e-02,", "[[ 9.60834277e-01, 3.91657228e-02], [ 3.10025519e-01, 6.89974481e-01], [ 1.79862100e-02, 9.82013790e-01], [", "model.predict(X_nan) y = [0, 1, 0, 0, 1, 0, 1,", "X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_log_proba(X) y_hat2 = model.predict_log_proba(X2) y_hat3", "def setup_hmm(): global model global hmm1 global hmm2 global hmm3", "pomegranate import * from pomegranate.io import DataGenerator from pomegranate.io import", "0.42795756], [ 0.46425765, 0.53574235], [ 0.09743127, 0.90256873], [ 0.99693828, 0.00306172],", "def test_bc_multivariate_gaussian_from_samples(): model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y) mu1 = model.distributions[0].parameters[0]", "= MultivariateGaussianDistribution(mu, cov) global model model = BayesClassifier([d1, d2]) global", "mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.2833333,", "X=X, y=y, weights=weights) bc2 = BayesClassifier.from_samples(d, X=data_generator) logp1 = bc1.log_probability(X)", "[0, 0, 0, 1, 1, 0, 0, 0, 0, 0]", "test_bc_multivariate_gaussian_predict(): y_hat = model.predict(X) y = [0, 0, 1, 1,", "[ 7.69692511e-01, 2.30307489e-01], [ 7.94751748e-01, 2.05248252e-01], [ 3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y,", "model.predict(X) y = [0, 0, 0, 1, 1, 0, 0,", "= numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights,", "-0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0],", "assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2,", "nose.tools import assert_raises from nose.tools import assert_true from numpy.testing import", "1.7], [ nan, nan, 1.8], [ 1.2, 1.8, 1.5], [", "hmm2, hmm3]) def setup_multivariate(): pass def teardown(): pass @with_setup(setup_multivariate_gaussian, teardown)", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_parallel(): y_hat = model.predict(X, n_jobs=2)", "unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) ) hmm1", "1.3, 0.1]]) global y y = [0, 0, 0, 1,", "[ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat", "assert_not_equal from nose.tools import assert_less_equal from nose.tools import assert_raises from", "assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')),", "[[0.75687499, 0.23687499, 0.4793750], [0.23687499, 0.40187499, 0.5318749], [0.47937500, 0.53187499, 0.7868750]] assert_array_almost_equal(mu1,", "[0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_from_samples():", "DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) ) unrigged = State(", "hmm3.add_transition(hmm3.start, unrigged, 0.5) hmm3.add_transition(hmm3.start, rigged, 0.5) hmm3.add_transition(rigged, rigged, 0.5) hmm3.add_transition(rigged,", "from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed,", "n_jobs=2) y = [0, 0, 1, 1, 1, 1, 1,", "def test_bc_multivariate_mixed_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 6.99086440e-01, 3.00913560e-01],", "2.68992964e-01, 7.31007036e-01], [ 7.69692511e-01, 2.30307489e-01], [ 7.94751748e-01, 2.05248252e-01], [ 3.09439547e-01,", "0.53187499, 0.78687500]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t)", "[ -9.31781101e+00, -8.98143220e-05], [ -6.29755079e-04, -7.37049444e+00], [ -1.31307006e+00, -3.13332194e-01], [", "teardown) def test_bc_multivariate_gaussian_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1", "= numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y =", "list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0) assert_equal(predicts[1], 0) assert_equal(predicts[2], 1) assert_equal(predicts[3], 1) assert_equal(predicts[4],", "y_hat1 = model.predict_proba(X) y_hat2 = model.predict_proba(X2) y_hat3 = model.predict_proba(X3) assert_array_almost_equal(y_hat1,", "0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict(): y_hat =", "+ 0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator", "= [0, 1, 1, 0, 1, 1, 1, 0, 0,", "unrigged, 1) hmm2.bake() hmm3 = HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged, 0.5) hmm3.add_transition(hmm3.start,", "8.31726965e-02], [ 9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_proba():", "model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0) assert_equal(predicts[1], 0) assert_equal(predicts[2],", "-6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba(): y_hat =", "hmm3]) def setup_multivariate(): pass def teardown(): pass @with_setup(setup_multivariate_gaussian, teardown) def", "[0.622777777, 1.340277777, 0.9547222222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t", "0.5], [ 0.7, -1.3, -0.1]]) global y y = [0,", "0.3, 0.5], [ nan, 1.3, nan]]) def setup_hmm(): global model", "-7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba(): y_hat = model.predict_log_proba(X)", "-2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01],", "1.79862100e-02, 9.82013790e-01], [ 5.00000000e-01, 5.00000000e-01], [ 5.54485247e-05, 9.99944551e-01], [ 3.68423990e-03,", "[ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [", "[ nan, -1.3, nan]]) def setup_multivariate_mixed(): mu, cov = [0,", "'T': 0.2 }) ) unrigged = State( DiscreteDistribution({ 'H': 0.5,", "hmm1.add_transition(rigged, rigged, 1) hmm1.bake() hmm2 = HiddenMarkovModel() hmm2.start = unrigged", "d2]) global X X = numpy.array([[ 0.3, 0.5, 0.1], [", "y_hat = model.predict(X_nan) y = [0, 1, 0, 0, 1,", "-1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1],", "assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')),", "assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183)", "0] global X_nan X_nan = numpy.array([[ 0.3, nan, 0.1], [", "[0, 0, 1, 1, 1, 1, 1, 0, 0, 0]", "assert_less_equal from nose.tools import assert_raises from nose.tools import assert_true from", "global model global hmm1 global hmm2 global hmm3 rigged =", "= model.predict_log_proba(X, n_jobs=2) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01,", "= HiddenMarkovModel() hmm2.start = unrigged hmm2.add_transition(unrigged, unrigged, 1) hmm2.bake() hmm3", "mu1_t = [1.033333, 1.3166667, 0.75] cov1_t = [[0.242222, 0.0594444, 0.178333],", "cov = numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2,", "assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm, teardown) def test_hmm_prediction(): predicts =", "teardown) def test_bc_multivariate_gaussian_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))", "0.414166], [0.178333, 0.4141666, 0.439166]] d21 = model.distributions[1].distributions[0] d22 = model.distributions[1].distributions[1]", "-1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba(): y_hat = model.predict_proba(X)", "-6.29755079e-04, -7.37049444e+00], [ -1.31307006e+00, -3.13332194e-01], [ -2.61764180e-01, -1.46833995e+00], [ -2.29725479e-01,", "[1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_from_samples(): model =", "nan], [ 1.4, 2.6, nan], [ nan, nan, nan], [", "mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [1.033333, 1.3166667,", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba(): y_hat = model.predict_proba(X) y = [[", "= ExponentialDistribution(5) d22 = LogNormalDistribution(0.2, 0.8) d23 = PoissonDistribution(3) d2", "3.3], [ 3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [", "y=y, weights=weights) bc2 = BayesClassifier.from_samples(d, X=data_generator) logp1 = bc1.log_probability(X) logp2", "[ nan, nan, 1.8], [-1.2, -1.8, -1.5], [ nan, 0.3,", "y = [0, 1, 1, 0, 1, 1, 1, 0,", "def test_hmm_proba(): probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0],", "hmm1 = HiddenMarkovModel() hmm1.start = rigged hmm1.add_transition(rigged, rigged, 1) hmm1.bake()", "MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc1 = BayesClassifier([d1, d2])", "1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown)", "2.2, 1.7], [ 1.8, 2.2, 1.8], [-1.2, -1.8, -1.5], [-1.8,", "pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian,", "teardown) def test_bc_multivariate_gaussian_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))", "numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2,", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[", "import assert_true from numpy.testing import assert_array_almost_equal import pandas import random", "assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian, teardown) def", "y_hat = model.predict_proba(X) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01,", "= model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) def test_io_fit(): X =", "-0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453", "DataFrameGenerator from nose.tools import with_setup from nose.tools import assert_almost_equal from", "def test_bc_multivariate_gaussian_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 =", "State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) ) unrigged =", "[ nan, 0.3, 0.5], [ nan, -1.3, nan]]) def setup_multivariate_mixed():", "0.3, 0.5], [ 0.7, 1.3, 0.1]]) global y y =", "2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3) assert_equal(model.n,", "teardown) def test_bc_multivariate_mixed_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[", "X X = numpy.array([[ 0.3, 0.5, 0.1], [ 0.8, 1.4,", "y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03,", "0]) mu2 = numpy.array([1, 1, 1, 1, 1]) cov =", "= model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275,", "assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')),", "teardown(): pass @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2)", "test_bc_multivariate_mixed_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0]", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y =", "[ -6.93147181e-01, -6.93147181e-01], [ -9.31781101e+00, -8.98143220e-05], [ -6.29755079e-04, -7.37049444e+00], [", "[ -1.17110067e+00, -3.71100666e-01], [ -4.01814993e+00, -1.81499279e-02], [ -6.93147181e-01, -6.93147181e-01], [", ") assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )", "= model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.28333333, 0.21666666] cov1_t = [[1.3088888,", "def test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388", "9.96315760e-01], [ 1.67981615e-01, 8.32018385e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.16827304e-01, 8.31726965e-02],", "-1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat =", "assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)", "[ 0.82997863, 0.17002137], [ 0.57204244, 0.42795756], [ 0.46425765, 0.53574235], [", "= DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_log_proba(X) y_hat2 = model.predict_log_proba(X2) y_hat3 =", "-3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03,", "= BayesClassifier([d1, d2]) bc1.fit(X, y, weights) d1 = MultivariateGaussianDistribution(mu1, cov)", "mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687500, 0.23687499, 0.47937500],", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2)", "1, 0, 1, 0, 0, 1] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown)", "0.3, 0.5, 0.1], [ 0.8, 1.4, 0.5], [ 1.4, 2.6,", "[ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [", "MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_pickle(): model2 = pickle.loads(pickle.dumps(model))", "3.3], [ 3.1, nan, 1.7], [ nan, nan, 1.8], [-1.2,", "model.distributions[0].parameters[1] mu1_t = [1.033333, 1.3166667, 0.75] cov1_t = [[0.242222, 0.0594444,", "0.99693828, 0.00306172], [ 0.90616916, 0.09383084], [ 0.76969251, 0.23030749], [ 0.99798563,", "[[ 6.99086440e-01, 3.00913560e-01], [ 2.98988163e-01, 7.01011837e-01], [ 7.84134838e-01, 2.15865162e-01], [", "teardown) def test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[", "import DataGenerator from pomegranate.io import DataFrameGenerator from nose.tools import with_setup", "nan, 0.3, 0.5], [ nan, 1.3, nan]]) def setup_hmm(): global", "y_hat2 = model.predict_log_proba(X2) y_hat3 = model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3)", "-7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03],", "assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_to_json(): model2 =", "0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict(): y_hat = model.predict(X)", "assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642)", "0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm,", "[ 0.09743127, 0.90256873], [ 0.99693828, 0.00306172], [ 0.90616916, 0.09383084], [", "global X X = numpy.array([[ 0.3, 0.5, 0.1], [ 0.8,", "= numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov)", "0.5, 0.1], [ 0.8, 1.4, 0.5], [ 1.4, 2.6, 1.8],", "from numpy.testing import assert_array_almost_equal import pandas import random import pickle", "assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_proba(): X2 =", "nan]]) def setup_multivariate_mixed(): mu, cov = [0, 0, 0], numpy.eye(3)", "0.28333333, 0.21666666] cov1_t = [[1.3088888, 0.9272222, 0.6227777], [0.9272222, 2.2513888, 1.3402777],", "1.7], [ nan, nan, 1.8], [-1.2, -1.8, -1.5], [ nan,", "1.8, 2.2, 1.8], [ 1.2, 1.8, 1.5], [ 1.8, 0.3,", "def test_io_predict_log_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 =", "assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d, 1) @with_setup(setup_hmm, teardown)", "[0.059444, 0.5980555, 0.414166], [0.178333, 0.4141666, 0.439166]] d21 = model.distributions[1].distributions[0] d22", "[-1.8, 0.3, 0.5], [ 0.7, -1.3, -0.1]]) global y y", "0.46425765, 0.53574235], [ 0.09743127, 0.90256873], [ 0.99693828, 0.00306172], [ 0.90616916,", "model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.2833333, 0.21666666] cov1_t = [[1.308888888, 0.9272222222,", "[ 8.98102888e-05, 9.99910190e-01], [ 9.99370443e-01, 6.29556825e-04], [ 2.68992964e-01, 7.31007036e-01], [", "0.21666666] cov1_t = [[1.308888888, 0.9272222222, 0.6227777777], [0.927222222, 2.251388888, 1.340277777], [0.622777777,", "pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed,", "def test_bc_multivariate_mixed_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -5.03107596e-01, -9.27980626e-01],", "nose.tools import assert_almost_equal from nose.tools import assert_equal from nose.tools import", "test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.99533332e-02, -3.23995333e+00], [", "0, 1, 0, 0, 1] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def", "y_hat = model.predict_log_proba(X_nan) y = [[ -3.99533332e-02, -3.23995333e+00], [ -1.17110067e+00,", "2.98988163e-01, 7.01011837e-01], [ 7.84134838e-01, 2.15865162e-01], [ 5.00000000e-01, 5.00000000e-01], [ 8.98102888e-05,", "assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292)", "import pandas import random import pickle import numpy as np", "0.9547222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t = [2.925,", "= model.predict_proba(X_nan) y = [[ 6.99086440e-01, 3.00913560e-01], [ 2.98988163e-01, 7.01011837e-01],", "6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba(): y_hat = model.predict_proba(X)", "MultivariateGaussianDistribution(mu, cov) d21 = ExponentialDistribution(5) d22 = LogNormalDistribution(0.2, 0.8) d23", "[ -8.68361522e-02, -2.48683615e+00], [ -1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown)", "DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_log_proba(X) y_hat2 = model.predict_log_proba(X2)", "0.8, 1.4, 0.5], [ 1.4, 2.6, 1.8], [ 4.2, 3.3,", "assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_to_json(): model2 =", "HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged, 0.5) hmm3.add_transition(hmm3.start, rigged, 0.5) hmm3.add_transition(rigged, rigged, 0.5)", "[ 0.46425765, 0.53574235], [ 0.09743127, 0.90256873], [ 0.99693828, 0.00306172], [", "nan, 0.3, 0.5], [ nan, -1.3, nan]]) def setup_multivariate_mixed(): mu,", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict(): y_hat = model.predict(X_nan) y", "global y y = [0, 0, 0, 1, 1, 1,", "model.distributions[1].distributions[1] d23 = model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034])", "9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]]", "model.predict_proba(X_nan) y = [[ 6.99086440e-01, 3.00913560e-01], [ 2.98988163e-01, 7.01011837e-01], [", "= BayesClassifier.from_samples(d, X=data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1,", "numpy as np nan = numpy.nan def setup_multivariate_gaussian(): mu, cov", "teardown) def test_hmm_proba(): probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))", "pass @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_,", "y = [[ 9.60834277e-01, 3.91657228e-02], [ 3.10025519e-01, 6.89974481e-01], [ 1.79862100e-02,", "assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba(): y_hat = model.predict_log_proba(X) y", "2.05248252e-01], [ 3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba_parallel():", "teardown) def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.57980882e-01,", "predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0) assert_equal(predicts[1],", "y_hat = model.predict_log_proba(X) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01,", "= BayesClassifier([hmm1, hmm2, hmm3]) def setup_multivariate(): pass def teardown(): pass", "assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707)", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_fit_parallel(): model.fit(X, y, n_jobs=2) mu1", "= MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc1 = BayesClassifier([d1,", "assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_fit_parallel(): model.fit(X, y,", "y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict(): y_hat = model.predict(X) y =", "-1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0],", "cov2_t) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier))", "nan, 1.7], [ nan, nan, 1.8], [ 1.2, 1.8, 1.5],", "assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512)", "d2 = MultivariateGaussianDistribution(mu2, cov) bc2 = BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1", "numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) d = MultivariateGaussianDistribution", "y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y", "model.predict(X) y = [0, 0, 1, 1, 1, 1, 1,", "-2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)", "data_generator = DataGenerator(X, weights, y) mu1 = numpy.array([0, 0, 0,", "= rigged hmm1.add_transition(rigged, rigged, 1) hmm1.bake() hmm2 = HiddenMarkovModel() hmm2.start", "cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_pickle(): model2", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict(): y_hat = model.predict(X_nan) y = [0,", "1.340277777, 0.9547222222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t =", "-0.7284958836972919) @with_setup(setup_hmm, teardown) def test_hmm_proba(): probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'),", "[ 0.76969251, 0.23030749], [ 0.99798563, 0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y,", "2, 2], numpy.eye(3) d2 = MultivariateGaussianDistribution(mu, cov) global model model", "0, 0, 0] global X_nan X_nan = numpy.array([[ 0.3, nan,", "assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_log_proba(): X2 =", "3.10025519e-01, 6.89974481e-01], [ 1.79862100e-02, 9.82013790e-01], [ 5.00000000e-01, 5.00000000e-01], [ 5.54485247e-05,", "-1.20093223e+00], [ -1.20735130e+00, -3.55230506e-01], [ -2.43174286e-01, -1.53310132e+00], [ -6.93147181e-01, -6.93147181e-01],", "[ 0.7, 1.3, 0.1]]) global y y = [0, 0,", "model.predict_proba(X2) y_hat3 = model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown)", "[ 5.00000000e-01, 5.00000000e-01], [ 5.54485247e-05, 9.99944551e-01], [ 3.68423990e-03, 9.96315760e-01], [", "1.3166667, 0.75] cov1_t = [[0.242222, 0.0594444, 0.178333], [0.059444, 0.5980555, 0.414166],", "y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y", "0.3, nan, 0.1], [ nan, 1.4, nan], [ 1.4, 2.6,", "mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_fit_parallel(): model.fit(X, y, n_jobs=2)", "0, 0, 1, 0, 1, 0, 0, 1] assert_array_almost_equal(y, y_hat)", "nose.tools import assert_not_equal from nose.tools import assert_less_equal from nose.tools import", "0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict(): y_hat =", "bc2.fit(data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict(): y_hat = model.predict(X_nan) y", "n_jobs=2) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [", "model.predict_proba(X) y = [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [", "0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) d21 = ExponentialDistribution(5)", "3.3, 3.7], [ 2.6, 3.6, 3.3], [ 3.1, 2.2, 1.7],", "model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2],", "weights, y) d = MultivariateGaussianDistribution bc1 = BayesClassifier.from_samples(d, X=X, y=y,", "-1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0],", "cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.28333333, 0.21666666] cov1_t =", "y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict(): y_hat = model.predict(X) y =", "-1.3, nan]]) def setup_multivariate_mixed(): mu, cov = [0, 0, 0],", "0.9272222222, 0.6227777777], [0.927222222, 2.251388888, 1.340277777], [0.622777777, 1.340277777, 0.9547222222]] mu2 =", "-1.53310132e+00], [ -6.93147181e-01, -6.93147181e-01], [ -9.31781101e+00, -8.98143220e-05], [ -6.29755079e-04, -7.37049444e+00],", "= model.predict_log_proba(X_nan) y = [[ -3.99533332e-02, -3.23995333e+00], [ -1.17110067e+00, -3.71100666e-01],", "X_nan = numpy.array([[ 0.3, nan, 0.1], [ nan, 1.4, nan],", "3.3], [ 3.1, nan, 1.7], [ nan, nan, 1.8], [", "2.15865162e-01], [ 5.00000000e-01, 5.00000000e-01], [ 8.98102888e-05, 9.99910190e-01], [ 9.99370443e-01, 6.29556825e-04],", "[0.47937500, 0.53187499, 0.7868750]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2,", "'H': 0.8, 'T': 0.2 }) ) unrigged = State( DiscreteDistribution({", "= [[ -3.99533332e-02, -3.23995333e+00], [ -1.17110067e+00, -3.71100666e-01], [ -4.01814993e+00, -1.81499279e-02],", "MultivariateGaussianDistribution(mu2, cov) bc1 = BayesClassifier([d1, d2]) bc1.fit(X, y, weights) d1", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0],", "def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.99533332e-02, -3.23995333e+00],", "test_hmm_prediction(): predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0)", "2) @with_setup(setup_multivariate_gaussian, teardown) def test_io_log_probability(): X2 = DataGenerator(X) X3 =", "assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_to_json():", "6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba_parallel(): y_hat = model.predict_proba(X,", "y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba(): y_hat = model.predict_proba(X) y =", "model.distributions[1].distributions[0] d22 = model.distributions[1].distributions[1] d23 = model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1,", "[ 1.4, 2.6, nan], [ nan, nan, nan], [ nan,", "assert_almost_equal from nose.tools import assert_equal from nose.tools import assert_not_equal from", "X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_proba(X) y_hat2 = model.predict_proba(X2) y_hat3", "mu, cov = [2, 2, 2], numpy.eye(3) d2 = MultivariateGaussianDistribution(mu,", "mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687499, 0.23687499, 0.4793750],", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0],", "0.78687500]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian,", "assert_equal(predicts[1], 0) assert_equal(predicts[2], 1) assert_equal(predicts[3], 1) assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian, teardown)", "0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict(): y_hat", "= model.predict(X2) y_hat3 = model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian,", "= model.predict_log_proba(X2) y_hat3 = model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) def", "[ 7.94751748e-01, 2.05248252e-01], [ 3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown)", "[0.23687499, 0.40187499, 0.53187499], [0.47937500, 0.53187499, 0.78687500]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t)", "nan, nan, 1.8], [ 1.2, 1.8, 1.5], [ nan, 0.3,", "test_bc_multivariate_gaussian_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))", "list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0) assert_equal(predicts[1], 0) assert_equal(predicts[2], 1) assert_equal(predicts[3],", "nan, nan, nan], [ nan, 3.6, 3.3], [ 3.1, nan,", "0.09743127, 0.90256873], [ 0.99693828, 0.00306172], [ 0.90616916, 0.09383084], [ 0.76969251,", "teardown) def test_bc_multivariate_mixed_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))", "-1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00,", "-1.8, -1.5], [-1.8, 0.3, 0.5], [ 0.7, -1.3, -0.1]]) global", "assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_robust_from_json(): model2 =", "0.5) hmm3.bake() model = BayesClassifier([hmm1, hmm2, hmm3]) def setup_multivariate(): pass", "teardown) def test_io_predict_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1", "y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -5.03107596e-01, -9.27980626e-01], [", "0, 0, 1, 1, 1, 1, 0, 0, 0] global", "np nan = numpy.nan def setup_multivariate_gaussian(): mu, cov = [0,", "assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian, teardown) def test_io_log_probability(): X2 = DataGenerator(X) X3", "model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.2833333, 0.21666666] cov1_t", "-1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian,", "1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_parallel():", "(division) from pomegranate import * from pomegranate.io import DataGenerator from", "test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -1.48842547e-02, -4.21488425e+00],", "y = [0, 1, 0, 0, 1, 0, 1, 0,", "numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc1", "-3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]]", "[ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba(): y_hat", "teardown) def test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[", "assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm, teardown) def test_hmm_proba():", "nan, -1.3, nan]]) def setup_multivariate_mixed(): mu, cov = [0, 0,", "-3.23995333e+00], [ -1.17110067e+00, -3.71100666e-01], [ -4.01814993e+00, -1.81499279e-02], [ -6.93147181e-01, -6.93147181e-01],", "-1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01,", "= [1.033333, 1.3166667, 0.75] cov1_t = [[0.242222, 0.0594444, 0.178333], [0.059444,", "model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t", "y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) d", "1, 0, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat)", "assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_fit_parallel():", "test_bc_multivariate_gaussian_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 9.60834277e-01, 3.91657228e-02], [", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y =", "-3.71100666e-01], [ -4.01814993e+00, -1.81499279e-02], [ -6.93147181e-01, -6.93147181e-01], [ -9.80005545e+00, -5.54500620e-05],", "assert_equal(predicts[3], 1) assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian, teardown) def test_io_log_probability(): X2 =", "[ 0.7, -1.3, -0.1]]) global y y = [0, 0,", "model.predict(X, n_jobs=2) y = [0, 0, 0, 1, 1, 0,", "1.8, 2.2, 1.8], [-1.2, -1.8, -1.5], [-1.8, 0.3, 0.5], [", "[ 3.10025519e-01, 6.89974481e-01], [ 1.79862100e-02, 9.82013790e-01], [ 5.00000000e-01, 5.00000000e-01], [", "0.1], [ nan, 1.4, nan], [ 1.4, 2.6, nan], [", "@with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_log_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X))", "list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0) assert_equal(predicts[1], 0) assert_equal(predicts[2], 1) assert_equal(predicts[3], 1)", "[2.925, 2.825, 2.625] cov2_t = [[0.75687500, 0.23687499, 0.47937500], [0.23687499, 0.40187499,", "-3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2],", "cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian, teardown)", "BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian,", "y = [0, 0, 1, 1, 1, 1, 1, 0,", "model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.28333333, 0.21666666] cov1_t = [[1.3088888, 0.9272222,", "assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105)", "teardown) def test_bc_multivariate_gaussian_from_samples(): model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y) mu1 =", "= [[ 9.60834277e-01, 3.91657228e-02], [ 3.10025519e-01, 6.89974481e-01], [ 1.79862100e-02, 9.82013790e-01],", "= model.log_probability(X3) assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict():", "nan]]) def setup_hmm(): global model global hmm1 global hmm2 global", "3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba_parallel():", "= DataFrameGenerator(pandas.DataFrame(X)) logp1 = model.log_probability(X) logp2 = model.log_probability(X2) logp3 =", "n_jobs=2) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [", "}) ) hmm1 = HiddenMarkovModel() hmm1.start = rigged hmm1.add_transition(rigged, rigged,", "-5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01,", "3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01,", "False) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_,", "MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_robust_from_json(): model2", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba(): y_hat = model.predict_proba(X) y", "[0.178333, 0.4141666, 0.439166]] d21 = model.distributions[1].distributions[0] d22 = model.distributions[1].distributions[1] d23", "[ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba_parallel(): y_hat", "1.7], [ 1.8, 2.2, 1.8], [-1.2, -1.8, -1.5], [-1.8, 0.3,", "9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07],", "assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict(): X2 =", "= [0, 1, 0, 0, 1, 0, 1, 0, 0,", "[0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) mu, cov", "0.1]]) global y y = [0, 0, 0, 1, 1,", "= MultivariateGaussianDistribution(mu2, cov) bc2 = BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1 =", "cov1_t = [[1.308888888, 0.9272222222, 0.6227777777], [0.927222222, 2.251388888, 1.340277777], [0.622777777, 1.340277777,", "from nose.tools import assert_almost_equal from nose.tools import assert_equal from nose.tools", "= model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0) assert_equal(predicts[1], 0)", "-6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba():", "= IndependentComponentsDistribution([d21, d22, d23]) global model model = BayesClassifier([d1, d2])", "-4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08],", "= DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_log_proba(X) y_hat2 =", "from nose.tools import assert_true from numpy.testing import assert_array_almost_equal import pandas", "2.251388888, 1.340277777], [0.622777777, 1.340277777, 0.9547222222]] mu2 = model.distributions[1].parameters[0] cov2 =", "0.5], [ 1.4, 2.6, 1.8], [ 4.2, 3.3, 3.7], [", "1, 0, 0, 1, 0, 1, 0, 0, 1] assert_array_almost_equal(y,", "= DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_proba(X) y_hat2 =", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0],", "y_hat3 = model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) def test_io_fit(): X", "[ 3.1, nan, 1.7], [ nan, nan, 1.8], [-1.2, -1.8,", "import assert_equal from nose.tools import assert_not_equal from nose.tools import assert_less_equal", "0.90616916, 0.09383084], [ 0.76969251, 0.23030749], [ 0.99798563, 0.00201437], [ 0.86297361,", "def test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[ 9.85225968e-01,", "= [2.925, 2.825, 2.625] cov2_t = [[0.75687499, 0.23687499, 0.4793750], [0.23687499,", "[ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [", "= model.predict(X) y_hat2 = model.predict(X2) y_hat3 = model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2)", "0.57204244, 0.42795756], [ 0.46425765, 0.53574235], [ 0.09743127, 0.90256873], [ 0.99693828,", "import assert_array_almost_equal import pandas import random import pickle import numpy", "nan, 1.3, nan]]) def setup_hmm(): global model global hmm1 global", "0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2],", "mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed, teardown) def", "[1.033333, 1.3166667, 0.75] cov1_t = [[0.242222, 0.0594444, 0.178333], [0.059444, 0.5980555,", "import assert_almost_equal from nose.tools import assert_equal from nose.tools import assert_not_equal", "y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) mu1", "2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba(): y_hat = model.predict_log_proba(X)", "9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba(): y_hat =", "model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y) mu1 = model.distributions[0].parameters[0] cov1 =", "[ -1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat", "[ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [", "y_hat2 = model.predict_proba(X2) y_hat3 = model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3)", "[ 1.8, 2.2, 1.8], [ 1.2, 1.8, 1.5], [ 1.8,", "d2 = MultivariateGaussianDistribution(mu, cov) global model model = BayesClassifier([d1, d2])", "= model.predict_log_proba(X_nan) y = [[ -3.57980882e-01, -1.20093223e+00], [ -1.20735130e+00, -3.55230506e-01],", "test_io_predict_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_proba(X)", "IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_to_json(): model2 = BayesClassifier.from_json(model.to_json())", "X, y) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t =", "7.94751748e-01, 2.05248252e-01], [ 3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def", "test_bc_multivariate_mixed_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))", "3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_initialization(): assert_equal(model.d,", "-0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781", "rigged hmm1.add_transition(rigged, rigged, 1) hmm1.bake() hmm2 = HiddenMarkovModel() hmm2.start =", "def test_bc_multivariate_gaussian_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1],", "2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01,", "cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc2 = BayesClassifier([d1, d2]) bc2.fit(data_generator)", "test_bc_multivariate_mixed_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1, 0, 0,", "6.89974481e-01], [ 1.79862100e-02, 9.82013790e-01], [ 5.00000000e-01, 5.00000000e-01], [ 5.54485247e-05, 9.99944551e-01],", "[ 2.98988163e-01, 7.01011837e-01], [ 7.84134838e-01, 2.15865162e-01], [ 5.00000000e-01, 5.00000000e-01], [", "[ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [", "1) @with_setup(setup_hmm, teardown) def test_hmm_log_proba(): logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'),", "BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t", "-1.17110067e+00, -3.71100666e-01], [ -4.01814993e+00, -1.81499279e-02], [ -6.93147181e-01, -6.93147181e-01], [ -9.80005545e+00,", "-3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04],", "@with_setup(setup_hmm, teardown) def test_hmm_prediction(): predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'),", "[ nan, nan, nan], [ nan, 3.6, 3.3], [ 3.1,", "[0, 1, 0, 0, 1, 0, 1, 0, 0, 1]", "1, 1, 1, 1]) cov = numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1,", "0.7868750]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed,", "import assert_not_equal from nose.tools import assert_less_equal from nose.tools import assert_raises", "assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba(): y_hat =", "logp1 = model.log_probability(X) logp2 = model.log_probability(X2) logp3 = model.log_probability(X3) assert_array_almost_equal(logp1,", "DataGenerator(X, weights, y) d = MultivariateGaussianDistribution bc1 = BayesClassifier.from_samples(d, X=X,", "-1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401", "assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311)", "[ nan, 0.3, 0.5], [ nan, 1.3, nan]]) def setup_hmm():", "0.5], [ 0.7, 1.3, 0.1]]) global y y = [0,", "nan, 1.7], [ nan, nan, 1.8], [-1.2, -1.8, -1.5], [", "y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y =", "data_generator = DataGenerator(X, weights, y) d = MultivariateGaussianDistribution bc1 =", "7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03,", "-2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')),", "-6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00],", "0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0],", "def setup_multivariate_mixed(): mu, cov = [0, 0, 0], numpy.eye(3) d1", "rigged, 1) hmm1.bake() hmm2 = HiddenMarkovModel() hmm2.start = unrigged hmm2.add_transition(unrigged,", "assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm, teardown)", "X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict(X) y_hat2", "hmm3.add_transition(unrigged, unrigged, 0.5) hmm3.bake() model = BayesClassifier([hmm1, hmm2, hmm3]) def", "rigged, 0.5) hmm3.add_transition(unrigged, unrigged, 0.5) hmm3.bake() model = BayesClassifier([hmm1, hmm2,", "[ 0.90616916, 0.09383084], [ 0.76969251, 0.23030749], [ 0.99798563, 0.00201437], [", "d2]) bc1.fit(X, y, weights) d1 = MultivariateGaussianDistribution(mu1, cov) d2 =", "assert_raises from nose.tools import assert_true from numpy.testing import assert_array_almost_equal import", "-4.01814993e+00, -1.81499279e-02], [ -6.93147181e-01, -6.93147181e-01], [ -9.80005545e+00, -5.54500620e-05], [ -5.60369104e+00,", "MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_to_json(): model2", "[ 1.2, 1.8, 1.5], [ 1.8, 0.3, 0.5], [ 0.7,", "DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) logp1 = model.log_probability(X) logp2 = model.log_probability(X2)", "model = BayesClassifier([d1, d2]) global X X = numpy.array([[ 0.3,", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0],", "0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict():", "y_hat3 = model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def", "def test_io_log_probability(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) logp1 =", "y_hat2) assert_array_almost_equal(y_hat1, y_hat3) def test_io_fit(): X = numpy.random.randn(100, 5) +", "d1 = MultivariateGaussianDistribution(mu, cov) d21 = ExponentialDistribution(5) d22 = LogNormalDistribution(0.2,", "= pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights)", "= [0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) d21", "nan, 1.4, nan], [ 1.4, 2.6, nan], [ nan, nan,", "9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_proba(): y_hat =", "bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def test_io_from_samples(): X =", "-1.5], [-1.8, 0.3, 0.5], [ 0.7, -1.3, -0.1]]) global y", "-1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm, teardown) def test_hmm_proba(): probs", "model2.weights) @with_setup(setup_hmm, teardown) def test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003", "global hmm3 rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2", "[ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat", "-1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1],", "[ 0.99693828, 0.00306172], [ 0.90616916, 0.09383084], [ 0.76969251, 0.23030749], [", "global X_nan X_nan = numpy.array([[ 0.3, nan, 0.1], [ nan,", "import pickle import numpy as np nan = numpy.nan def", "import random import pickle import numpy as np nan =", "0, 0]) mu2 = numpy.array([1, 1, 1, 1, 1]) cov", "y_hat = model.predict_proba(X) y = [[ 0.60464873, 0.39535127], [ 0.82997863,", "hmm3 = HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged, 0.5) hmm3.add_transition(hmm3.start, rigged, 0.5) hmm3.add_transition(rigged,", "2], numpy.eye(3) d2 = MultivariateGaussianDistribution(mu, cov) global model model =", "[0.03333333, 0.2833333, 0.21666666] cov1_t = [[1.308888888, 0.9272222222, 0.6227777777], [0.927222222, 2.251388888,", "-1.3, -0.1]]) global y y = [0, 0, 0, 1,", "0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator =", "@with_setup(setup_multivariate_gaussian, teardown) def test_io_log_probability(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X))", "assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526)", "-1.8, -1.5], [ nan, 0.3, 0.5], [ nan, -1.3, nan]])", "[ 2.68992964e-01, 7.31007036e-01], [ 7.69692511e-01, 2.30307489e-01], [ 7.94751748e-01, 2.05248252e-01], [", "teardown) def test_bc_multivariate_mixed_predict_proba(): y_hat = model.predict_proba(X) y = [[ 0.60464873,", "def test_bc_multivariate_gaussian_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y = [0, 0,", "model.predict_log_proba(X) y_hat2 = model.predict_log_proba(X2) y_hat3 = model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1,", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[", "1] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_parallel(): y_hat = model.predict(X,", "[[ -3.99533332e-02, -3.23995333e+00], [ -1.17110067e+00, -3.71100666e-01], [ -4.01814993e+00, -1.81499279e-02], [", "y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba(): y_hat = model.predict_log_proba(X) y =", "5.00000000e-01, 5.00000000e-01], [ 5.54485247e-05, 9.99944551e-01], [ 3.68423990e-03, 9.96315760e-01], [ 1.67981615e-01,", ") assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )", "3.3], [ 3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [-1.2,", "HiddenMarkovModel() hmm1.start = rigged hmm1.add_transition(rigged, rigged, 1) hmm1.bake() hmm2 =", "test_io_from_samples(): X = numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100))", "= numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) d =", ") assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )", "0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict(): y_hat = model.predict(X_nan)", "assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3)", "[ -3.05902274e-07, -1.50000003e+01], [ -8.68361522e-02, -2.48683615e+00], [ -1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y,", "PoissonDistribution(3) d2 = IndependentComponentsDistribution([d21, d22, d23]) global model model =", "X = numpy.array([[ 0.3, 0.5, 0.1], [ 0.8, 1.4, 0.5],", "assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm, teardown) def test_hmm_proba(): probs =", "@with_setup(setup_hmm, teardown) def test_hmm_proba(): probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'),", "y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00,", "[0.23687499, 0.40187499, 0.5318749], [0.47937500, 0.53187499, 0.7868750]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t)", "1, 1, 1, 0, 0, 0] global X_nan X_nan =", "-0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')),", "0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2],", "y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_log_proba(): X2 = DataGenerator(X) X3 =", "nose.tools import with_setup from nose.tools import assert_almost_equal from nose.tools import", "assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_robust_from_json(): model2 =", "y_hat3 = model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def", "y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t =", "list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331)", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_parallel(): y_hat = model.predict(X, n_jobs=2)", "1, 1, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat)", "def test_bc_multivariate_mixed_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1],", "= model.log_probability(X2) logp3 = model.log_probability(X3) assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian,", "[ 1.4, 2.6, 1.8], [ 4.2, 3.3, 3.7], [ 2.6,", "0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict(): y_hat = model.predict(X)", "-6.93147181e-01], [ -9.31781101e+00, -8.98143220e-05], [ -6.29755079e-04, -7.37049444e+00], [ -1.31307006e+00, -3.13332194e-01],", "logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def test_io_from_samples():", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[", "[[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [", "= model.predict_log_proba(X) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00],", "0.00306172], [ 0.90616916, 0.09383084], [ 0.76969251, 0.23030749], [ 0.99798563, 0.00201437],", "hmm3.bake() model = BayesClassifier([hmm1, hmm2, hmm3]) def setup_multivariate(): pass def", "9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba_parallel(): y_hat =", "[ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [", "-8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d, 1) @with_setup(setup_hmm, teardown) def test_hmm_log_proba(): logs", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict(): y_hat = model.predict(X) y = [0,", "= numpy.array([0, 0, 0, 0, 0]) mu2 = numpy.array([1, 1,", "3.6, 3.3], [ 3.1, nan, 1.7], [ nan, nan, 1.8],", "numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y)", "model.predict_log_proba(X) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [", "= BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1]", "assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906)", "def test_io_predict_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 =", "rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )", "def test_bc_multivariate_mixed_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1],", "model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier))", "y_hat = model.predict_proba(X, n_jobs=2) y = [[ 0.60464873, 0.39535127], [", "0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2],", "test_bc_multivariate_mixed_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))", "= [2, 2, 2], numpy.eye(3) d2 = MultivariateGaussianDistribution(mu, cov) global", "0.5) hmm3.add_transition(unrigged, unrigged, 0.5) hmm3.bake() model = BayesClassifier([hmm1, hmm2, hmm3])", "= from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights)", "[[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [", "0, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_fit_parallel():", "hmm3.add_transition(hmm3.start, rigged, 0.5) hmm3.add_transition(rigged, rigged, 0.5) hmm3.add_transition(rigged, unrigged, 0.5) hmm3.add_transition(unrigged,", "-8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04,", "8.32018385e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.16827304e-01, 8.31726965e-02], [ 9.90048198e-01, 9.95180187e-03]]", "model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier))", "-3.57980882e-01, -1.20093223e+00], [ -1.20735130e+00, -3.55230506e-01], [ -2.43174286e-01, -1.53310132e+00], [ -6.93147181e-01,", "-6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781", "0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm, teardown) def test_hmm_prediction(): predicts", "= BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights)", "-1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan)", "= HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged, 0.5) hmm3.add_transition(hmm3.start, rigged, 0.5) hmm3.add_transition(rigged, rigged,", "teardown) def test_bc_multivariate_mixed_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -5.03107596e-01,", "[ nan, 1.4, nan], [ 1.4, 2.6, nan], [ nan,", "y = [0, 0, 0, 1, 1, 1, 1, 0,", "-9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01,", "-2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)", "from nose.tools import with_setup from nose.tools import assert_almost_equal from nose.tools", "d23]) global model model = BayesClassifier([d1, d2]) global X X", "= model.predict(X) y = [0, 0, 0, 1, 1, 0,", "0.17002137], [ 0.57204244, 0.42795756], [ 0.46425765, 0.53574235], [ 0.09743127, 0.90256873],", "9.99944551e-01], [ 3.68423990e-03, 9.96315760e-01], [ 1.67981615e-01, 8.32018385e-01], [ 9.99999694e-01, 3.05902227e-07],", "0.5) hmm3.add_transition(rigged, unrigged, 0.5) hmm3.add_transition(unrigged, rigged, 0.5) hmm3.add_transition(unrigged, unrigged, 0.5)", "0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm, teardown) def test_hmm_prediction(): predicts = model.predict(np.array([list('H'),", "1) hmm2.bake() hmm3 = HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged, 0.5) hmm3.add_transition(hmm3.start, rigged,", "assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_pickle():", "y_hat = model.predict(X) y = [0, 0, 1, 1, 1,", "0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm, teardown) def", ") unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )", "[ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y,", "= [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [ 0.57204244, 0.42795756],", "def test_bc_multivariate_gaussian_predict(): y_hat = model.predict(X) y = [0, 0, 1,", "bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def test_io_from_samples(): X = numpy.random.randn(100, 5) +", "'T':0.5 }) ) hmm1 = HiddenMarkovModel() hmm1.start = rigged hmm1.add_transition(rigged,", "[ 7.84134838e-01, 2.15865162e-01], [ 5.00000000e-01, 5.00000000e-01], [ 8.98102888e-05, 9.99910190e-01], [", "X3 = DataFrameGenerator(pandas.DataFrame(X)) logp1 = model.log_probability(X) logp2 = model.log_probability(X2) logp3", "0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) mu, cov = [2,", "9.95180187e-03]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_proba(): y_hat = model.predict_proba(X_nan)", "from nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools", "= model.predict(X_nan) y = [0, 1, 1, 0, 1, 1,", "model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier))", "assert_equal(model.d, 1) @with_setup(setup_hmm, teardown) def test_hmm_log_proba(): logs = model.predict_log_proba(np.array([list('H'), list('THHH'),", "= model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687499,", "1.7], [ 1.8, 2.2, 1.8], [ 1.2, 1.8, 1.5], [", "0.99798563, 0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def", "0.5, 'T':0.5 }) ) hmm1 = HiddenMarkovModel() hmm1.start = rigged", "as np nan = numpy.nan def setup_multivariate_gaussian(): mu, cov =", "1, 1]) cov = numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1, cov) d2", "y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y", "test_bc_multivariate_mixed_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 6.99086440e-01, 3.00913560e-01], [", "y_hat = model.predict(X_nan) y = [0, 1, 1, 0, 1,", "4.2, 3.3, 3.7], [ 2.6, 3.6, 3.3], [ 3.1, 2.2,", "-6.93147181e-01], [ -9.80005545e+00, -5.54500620e-05], [ -5.60369104e+00, -3.69104343e-03], [ -1.78390074e+00, -1.83900741e-01],", "nose.tools import assert_less_equal from nose.tools import assert_raises from nose.tools import", "cov) d21 = ExponentialDistribution(5) d22 = LogNormalDistribution(0.2, 0.8) d23 =", "-3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat)", "[2, 2, 2], numpy.eye(3) d2 = MultivariateGaussianDistribution(mu, cov) global model", "list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1],", "pomegranate.io import DataFrameGenerator from nose.tools import with_setup from nose.tools import", "= BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X)", "= BayesClassifier([d1, d2]) global X X = numpy.array([[ 0.3, 0.5,", "1.8, 0.3, 0.5], [ 0.7, 1.3, 0.1]]) global y y", "assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d, 1)", "def teardown(): pass @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3) assert_equal(model.n,", "def test_bc_multivariate_gaussian_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1],", "nan, nan], [ nan, 3.6, 3.3], [ 3.1, nan, 1.7],", "model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687500, 0.23687499,", "-1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm, teardown) def test_hmm_proba(): probs = model.predict_proba(np.array([list('H'),", "BayesClassifier([d1, d2]) bc1.fit(X, y, weights) d1 = MultivariateGaussianDistribution(mu1, cov) d2", "bc2 = BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1 = bc1.log_probability(X) logp2 =", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y =", "y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y =", ") assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )", "y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y", "-3.55230506e-01], [ -2.43174286e-01, -1.53310132e+00], [ -6.93147181e-01, -6.93147181e-01], [ -9.31781101e+00, -8.98143220e-05],", "y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_log_proba(): X2 = DataGenerator(X)", "= model.distributions[1].distributions[0] d22 = model.distributions[1].distributions[1] d23 = model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t)", "5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100)", "y y = [0, 0, 0, 1, 1, 1, 1,", "0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0],", "cov) bc2 = BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1 = bc1.log_probability(X) logp2", "y = [[ -3.99533332e-02, -3.23995333e+00], [ -1.17110067e+00, -3.71100666e-01], [ -4.01814993e+00,", "-5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00,", "-5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03,", "model.log_probability(X3) assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict(): X2", "mu1_t = [0.03333333, 0.28333333, 0.21666666] cov1_t = [[1.3088888, 0.9272222, 0.6227777],", "nan], [ nan, 3.6, 3.3], [ 3.1, nan, 1.7], [", "0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict(): y_hat = model.predict(X_nan)", "y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_proba(): X2 = DataGenerator(X) X3 =", "1.8], [ 1.2, 1.8, 1.5], [ nan, 0.3, 0.5], [", "cov2_t = [[0.75687499, 0.23687499, 0.4793750], [0.23687499, 0.40187499, 0.5318749], [0.47937500, 0.53187499,", "-2.43174286e-01, -1.53310132e+00], [ -6.93147181e-01, -6.93147181e-01], [ -9.31781101e+00, -8.98143220e-05], [ -6.29755079e-04,", "= model.distributions[1].distributions[1] d23 = model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters,", "model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights,", "with_setup from nose.tools import assert_almost_equal from nose.tools import assert_equal from", "6.90560453e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat = model.predict_proba(X,", "y_hat = model.predict(X, n_jobs=2) y = [0, 0, 1, 1,", "[ nan, 1.3, nan]]) def setup_hmm(): global model global hmm1", "9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01],", "= [[0.75687499, 0.23687499, 0.4793750], [0.23687499, 0.40187499, 0.5318749], [0.47937500, 0.53187499, 0.7868750]]", "assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm, teardown) def test_hmm_prediction(): predicts = model.predict(np.array([list('H'), list('THHH'),", "assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_log_proba(): X2 = DataGenerator(X) X3", "0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0],", "teardown) def test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[", ") assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )", "assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)", "test_io_fit(): X = numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100))", "-2.61764180e-01, -1.46833995e+00], [ -2.29725479e-01, -1.58353505e+00], [ -1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y, y_hat)", "def setup_multivariate(): pass def teardown(): pass @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_initialization():", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba(): y_hat = model.predict_proba(X) y", "-5.54500620e-05], [ -5.60369104e+00, -3.69104343e-03], [ -1.78390074e+00, -1.83900741e-01], [ -3.05902274e-07, -1.50000003e+01],", "model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights,", "cov1_t = [[0.242222, 0.0594444, 0.178333], [0.059444, 0.5980555, 0.414166], [0.178333, 0.4141666,", "assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1], 0.61102706038265642) assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794)", "teardown) def test_bc_multivariate_mixed_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))", "-1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat =", "[ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [", "assert_equal(predicts[2], 1) assert_equal(predicts[3], 1) assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian, teardown) def test_io_log_probability():", ") assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')),", "[0.6227777, 1.3402777, 0.9547222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y =", "3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [-1.2, -1.8, -1.5],", "= MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc2 = BayesClassifier([d1,", "5.00000000e-01], [ 8.98102888e-05, 9.99910190e-01], [ 9.99370443e-01, 6.29556825e-04], [ 2.68992964e-01, 7.31007036e-01],", "-9.31781101e+00, -8.98143220e-05], [ -6.29755079e-04, -7.37049444e+00], [ -1.31307006e+00, -3.13332194e-01], [ -2.61764180e-01,", "-2.48683615e+00], [ -1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_log_proba():", "assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')),", "y = [[ 6.99086440e-01, 3.00913560e-01], [ 2.98988163e-01, 7.01011837e-01], [ 7.84134838e-01,", "2.2, 1.8], [ 1.2, 1.8, 1.5], [ 1.8, 0.3, 0.5],", "7.01011837e-01], [ 7.84134838e-01, 2.15865162e-01], [ 5.00000000e-01, 5.00000000e-01], [ 8.98102888e-05, 9.99910190e-01],", "def test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -1.48842547e-02,", "= DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict(X) y_hat2 =", "from nose.tools import assert_raises from nose.tools import assert_true from numpy.testing", "teardown) def test_bc_multivariate_mixed_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 6.99086440e-01,", "list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0], 0) assert_equal(predicts[1], 0) assert_equal(predicts[2], 1)", "-1.81499279e-02], [ -6.93147181e-01, -6.93147181e-01], [ -9.80005545e+00, -5.54500620e-05], [ -5.60369104e+00, -3.69104343e-03],", "[ -1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat", "mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t = [2.925, 2.825,", "assert_array_almost_equal import pandas import random import pickle import numpy as", "numpy.eye(3) d2 = MultivariateGaussianDistribution(mu, cov) global model model = BayesClassifier([d1,", "-5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04,", "n_jobs=2) y = [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [", "-1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00,", "-1.58353505e+00], [ -1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba_parallel():", "model.predict_proba(X, n_jobs=2) y = [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137],", "model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])", "numpy.array([[ 0.3, 0.5, 0.1], [ 0.8, 1.4, 0.5], [ 1.4,", "setup_multivariate_gaussian(): mu, cov = [0, 0, 0], numpy.eye(3) d1 =", "= model.predict(X, n_jobs=2) y = [0, 0, 1, 1, 1,", "y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict(): y_hat = model.predict(X_nan) y =", "[ 9.16827304e-01, 8.31726965e-02], [ 9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown)", "assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')),", "-8.68361522e-02, -2.48683615e+00], [ -1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def", "= model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.2833333, 0.21666666]", "= model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006)", "BayesClassifier([hmm1, hmm2, hmm3]) def setup_multivariate(): pass def teardown(): pass @with_setup(setup_multivariate_gaussian,", "[ 5.00000000e-01, 5.00000000e-01], [ 8.98102888e-05, 9.99910190e-01], [ 9.99370443e-01, 6.29556825e-04], [", "False) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba(): y_hat = model.predict_log_proba(X) y =", "model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights,", "3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04,", "cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.2833333, 0.21666666] cov1_t =", "1, 1, 1, 1, 0, 0, 0] global X_nan X_nan", "0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) d21 = ExponentialDistribution(5) d22", "DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) ) hmm1 = HiddenMarkovModel() hmm1.start", "test_bc_multivariate_gaussian_from_samples(): model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y) mu1 = model.distributions[0].parameters[0] cov1", "= [[ -3.57980882e-01, -1.20093223e+00], [ -1.20735130e+00, -3.55230506e-01], [ -2.43174286e-01, -1.53310132e+00],", "model.predict_log_proba(X, n_jobs=2) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00],", "d22, d23]) global model model = BayesClassifier([d1, d2]) global X", "assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_initialization():", "weights = numpy.abs(numpy.random.randn(100)) y = numpy.random.randint(2, size=100) data_generator = DataGenerator(X,", "import assert_raises from nose.tools import assert_true from numpy.testing import assert_array_almost_equal", "0.82997863, 0.17002137], [ 0.57204244, 0.42795756], [ 0.46425765, 0.53574235], [ 0.09743127,", "5.54485247e-05, 9.99944551e-01], [ 3.68423990e-03, 9.96315760e-01], [ 1.67981615e-01, 8.32018385e-01], [ 9.99999694e-01,", "= model.predict_proba(X) y = [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137],", "@with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X))", "logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def test_io_from_samples(): X = numpy.random.randn(100,", "assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185)", "[ -2.43174286e-01, -1.53310132e+00], [ -6.93147181e-01, -6.93147181e-01], [ -9.31781101e+00, -8.98143220e-05], [", "0.23687499, 0.47937500], [0.23687499, 0.40187499, 0.53187499], [0.47937500, 0.53187499, 0.78687500]] assert_array_almost_equal(mu1, mu1_t)", "0, 1, 1, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y,", "teardown) def test_bc_multivariate_gaussian_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))", "[0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) d21 =", "assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_robust_from_json():", "y = [[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [ 0.57204244,", "[[0.75687500, 0.23687499, 0.47937500], [0.23687499, 0.40187499, 0.53187499], [0.47937500, 0.53187499, 0.78687500]] assert_array_almost_equal(mu1,", "= model.predict_log_proba(X) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00],", "test_bc_multivariate_mixed_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y = [0, 0, 0,", "BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def", "-1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba(): y_hat =", "1) hmm1.bake() hmm2 = HiddenMarkovModel() hmm2.start = unrigged hmm2.add_transition(unrigged, unrigged,", "cov1 = model.distributions[0].parameters[1] mu1_t = [1.033333, 1.3166667, 0.75] cov1_t =", "0.5) hmm3.add_transition(rigged, rigged, 0.5) hmm3.add_transition(rigged, unrigged, 0.5) hmm3.add_transition(unrigged, rigged, 0.5)", "2.2, 1.8], [-1.2, -1.8, -1.5], [-1.8, 0.3, 0.5], [ 0.7,", "model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2],", "-1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06],", "= numpy.array([1, 1, 1, 1, 1]) cov = numpy.eye(5) d1", "hmm1 global hmm2 global hmm3 rigged = State( DiscreteDistribution({ 'H':", "BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def", "assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters, [2.625]) @with_setup(setup_multivariate_gaussian,", "from pomegranate.io import DataGenerator from pomegranate.io import DataFrameGenerator from nose.tools", "0.9547222222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t = [2.925,", "0.2833333, 0.21666666] cov1_t = [[1.308888888, 0.9272222222, 0.6227777777], [0.927222222, 2.251388888, 1.340277777],", "def test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -5.03107596e-01,", "1, 0, 0, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown)", "0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1],", "d21 = ExponentialDistribution(5) d22 = LogNormalDistribution(0.2, 0.8) d23 = PoissonDistribution(3)", "9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def", ") assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )", "assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm, teardown) def test_model():", ") hmm1 = HiddenMarkovModel() hmm1.start = rigged hmm1.add_transition(rigged, rigged, 1)", "assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2,", "1.5], [ nan, 0.3, 0.5], [ nan, 1.3, nan]]) def", "weights) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc2", "0.3, 0.5], [ nan, -1.3, nan]]) def setup_multivariate_mixed(): mu, cov", "1.8], [-1.2, -1.8, -1.5], [-1.8, 0.3, 0.5], [ 0.7, -1.3,", "y_hat1 = model.predict_log_proba(X) y_hat2 = model.predict_log_proba(X2) y_hat3 = model.predict_log_proba(X3) assert_array_almost_equal(y_hat1,", "[ 3.1, nan, 1.7], [ nan, nan, 1.8], [ 1.2,", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2)", "-0.1]]) global y y = [0, 0, 0, 1, 1,", "0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523) assert_almost_equal(probs[3][1],", "[ 0.57204244, 0.42795756], [ 0.46425765, 0.53574235], [ 0.09743127, 0.90256873], [", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict(): y_hat = model.predict(X_nan) y = [0,", "y_hat = model.predict_log_proba(X_nan) y = [[ -3.57980882e-01, -1.20093223e+00], [ -1.20735130e+00,", "-3.69104343e-03], [ -1.78390074e+00, -1.83900741e-01], [ -3.05902274e-07, -1.50000003e+01], [ -8.68361522e-02, -2.48683615e+00],", "assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba():", "X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_proba(X) y_hat2", "from pomegranate.io import DataFrameGenerator from nose.tools import with_setup from nose.tools", "9.16827304e-01, 8.31726965e-02], [ 9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def", "y_hat = model.predict_log_proba(X) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01,", "BayesClassifier.from_samples(d, X=data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2)", "numpy.array([1, 1, 1, 1, 1]) cov = numpy.eye(5) d1 =", "d1 = MultivariateGaussianDistribution(mu, cov) mu, cov = [2, 2, 2],", "= model.predict_log_proba(X, n_jobs=2) y = [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01,", "nose.tools import assert_equal from nose.tools import assert_not_equal from nose.tools import", "y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict(): y_hat = model.predict(X_nan) y =", "hmm1.start = rigged hmm1.add_transition(rigged, rigged, 1) hmm1.bake() hmm2 = HiddenMarkovModel()", "1.8], [ 1.2, 1.8, 1.5], [ 1.8, 0.3, 0.5], [", "[ -1.31307006e+00, -3.13332194e-01], [ -2.61764180e-01, -1.46833995e+00], [ -2.29725479e-01, -1.58353505e+00], [", "numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) mu, cov = [2, 2,", "assert_true from numpy.testing import assert_array_almost_equal import pandas import random import", "0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict(): y_hat =", "= model.predict_proba(X, n_jobs=2) y = [[ 0.60464873, 0.39535127], [ 0.82997863,", "2.30307489e-01], [ 7.94751748e-01, 2.05248252e-01], [ 3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian,", "DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_proba(X) y_hat2 = model.predict_proba(X2) y_hat3 = model.predict_proba(X3)", "1, 1, 0, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y,", "= [[ 6.99086440e-01, 3.00913560e-01], [ 2.98988163e-01, 7.01011837e-01], [ 7.84134838e-01, 2.15865162e-01],", "= State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) ) hmm1 =", "= DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) logp1 = model.log_probability(X) logp2 =", "= DataGenerator(X, weights, y) d = MultivariateGaussianDistribution bc1 = BayesClassifier.from_samples(d,", "[ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict(): y_hat", "[[1.3088888, 0.9272222, 0.6227777], [0.9272222, 2.2513888, 1.3402777], [0.6227777, 1.3402777, 0.9547222]] mu2", "0.4793750], [0.23687499, 0.40187499, 0.5318749], [0.47937500, 0.53187499, 0.7868750]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1,", "test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )", "= bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def test_io_from_samples(): X = numpy.random.randn(100, 5)", "= [0.03333333, 0.2833333, 0.21666666] cov1_t = [[1.308888888, 0.9272222222, 0.6227777777], [0.927222222,", "[ 5.54485247e-05, 9.99944551e-01], [ 3.68423990e-03, 9.96315760e-01], [ 1.67981615e-01, 8.32018385e-01], [", "0.53574235], [ 0.09743127, 0.90256873], [ 0.99693828, 0.00306172], [ 0.90616916, 0.09383084],", "teardown) def test_bc_multivariate_mixed_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))", "test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed, teardown) def", "-7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X,", "-1.83900741e-01], [ -3.05902274e-07, -1.50000003e+01], [ -8.68361522e-02, -2.48683615e+00], [ -1.00016521e-02, -4.61000165e+00]]", "test_bc_multivariate_gaussian_nan_predict(): y_hat = model.predict(X_nan) y = [0, 1, 1, 0,", "model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.28333333, 0.21666666] cov1_t", "teardown) def test_bc_multivariate_gaussian_predict(): y_hat = model.predict(X) y = [0, 0,", "[ -5.60369104e+00, -3.69104343e-03], [ -1.78390074e+00, -1.83900741e-01], [ -3.05902274e-07, -1.50000003e+01], [", "numpy.array([0, 0, 0, 0, 0]) mu2 = numpy.array([1, 1, 1,", "IndependentComponentsDistribution([d21, d22, d23]) global model model = BayesClassifier([d1, d2]) global", "assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)", "2.625] cov2_t = [[0.75687499, 0.23687499, 0.4793750], [0.23687499, 0.40187499, 0.5318749], [0.47937500,", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict(): y_hat = model.predict(X) y", "y) mu1 = numpy.array([0, 0, 0, 0, 0]) mu2 =", "DataFrameGenerator(pandas.DataFrame(X)) logp1 = model.log_probability(X) logp2 = model.log_probability(X2) logp3 = model.log_probability(X3)", "-2.2788685663767296 ) assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453", "-3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00],", "5.00000000e-01, 5.00000000e-01], [ 8.98102888e-05, 9.99910190e-01], [ 9.99370443e-01, 6.29556825e-04], [ 2.68992964e-01,", "def test_bc_multivariate_mixed_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[ 0.60464873,", "8.98102888e-05, 9.99910190e-01], [ 9.99370443e-01, 6.29556825e-04], [ 2.68992964e-01, 7.31007036e-01], [ 7.69692511e-01,", "1, 1, 0, 0, 0] global X_nan X_nan = numpy.array([[", "[ 1.8, 2.2, 1.8], [-1.2, -1.8, -1.5], [-1.8, 0.3, 0.5],", "BayesClassifier([d1, d2]) global X X = numpy.array([[ 0.3, 0.5, 0.1],", "[ 3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat", "def test_bc_multivariate_gaussian_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -1.48842547e-02, -4.21488425e+00],", ") assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 ) assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )", "= DataGenerator(X, weights, y) mu1 = numpy.array([0, 0, 0, 0,", "1, 0, 0, 0] global X_nan X_nan = numpy.array([[ 0.3,", "[ -1.20735130e+00, -3.55230506e-01], [ -2.43174286e-01, -1.53310132e+00], [ -6.93147181e-01, -6.93147181e-01], [", "0.3, 0.5], [ 0.7, -1.3, -0.1]]) global y y =", "test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -5.03107596e-01, -9.27980626e-01],", "teardown) def test_hmm_log_proba(): logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))", "import numpy as np nan = numpy.nan def setup_multivariate_gaussian(): mu,", "5.00000000e-01], [ 5.54485247e-05, 9.99944551e-01], [ 3.68423990e-03, 9.96315760e-01], [ 1.67981615e-01, 8.32018385e-01],", "teardown) def test_hmm_prediction(): predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))", "y) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333,", "0.7, -1.3, -0.1]]) global y y = [0, 0, 0,", "State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) ) hmm1 = HiddenMarkovModel()", "-4.61000165e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan)", "0.439166]] d21 = model.distributions[1].distributions[0] d22 = model.distributions[1].distributions[1] d23 = model.distributions[1].distributions[2]", "1.67981615e-01, 8.32018385e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.16827304e-01, 8.31726965e-02], [ 9.90048198e-01,", "nan], [ nan, nan, nan], [ nan, 3.6, 3.3], [", "mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_pickle(): model2 = pickle.loads(pickle.dumps(model))", "def test_hmm_log_proba(): logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0],", "-2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat)", "X_nan X_nan = numpy.array([[ 0.3, nan, 0.1], [ nan, 1.4,", "-2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01,", "0, 0, 0]) mu2 = numpy.array([1, 1, 1, 1, 1])", "3.1, nan, 1.7], [ nan, nan, 1.8], [ 1.2, 1.8,", "pandas import random import pickle import numpy as np nan", "assert_array_almost_equal(logp1, logp2) def test_io_from_samples(): X = numpy.random.randn(100, 5) + 0.5", "assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919)", "mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian, teardown) def", "assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2)", "assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958)", "d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc2 =", "y_hat2 = model.predict(X2) y_hat3 = model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3)", "assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict(): X2 = DataGenerator(X) X3", "DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_log_proba(X) y_hat2 = model.predict_log_proba(X2) y_hat3 = model.predict_log_proba(X3)", "2.6, 3.6, 3.3], [ 3.1, 2.2, 1.7], [ 1.8, 2.2,", "0.2 }) ) unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5", "0.40187499, 0.53187499], [0.47937500, 0.53187499, 0.78687500]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2,", "= [[0.242222, 0.0594444, 0.178333], [0.059444, 0.5980555, 0.414166], [0.178333, 0.4141666, 0.439166]]", "-1.78390074e+00, -1.83900741e-01], [ -3.05902274e-07, -1.50000003e+01], [ -8.68361522e-02, -2.48683615e+00], [ -1.00016521e-02,", "y_hat = model.predict_proba(X_nan) y = [[ 6.99086440e-01, 3.00913560e-01], [ 2.98988163e-01,", "MultivariateGaussianDistribution(mu2, cov) bc2 = BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1 = bc1.log_probability(X)", "def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 9.60834277e-01, 3.91657228e-02],", "[ 1.2, 1.8, 1.5], [ nan, 0.3, 0.5], [ nan,", "7.84134838e-01, 2.15865162e-01], [ 5.00000000e-01, 5.00000000e-01], [ 8.98102888e-05, 9.99910190e-01], [ 9.99370443e-01,", "weights=weights) bc2 = BayesClassifier.from_samples(d, X=data_generator) logp1 = bc1.log_probability(X) logp2 =", "setup_multivariate_mixed(): mu, cov = [0, 0, 0], numpy.eye(3) d1 =", "cov) mu, cov = [2, 2, 2], numpy.eye(3) d2 =", "-1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed,", "assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_robust_from_json():", "mu1_t = [0.03333333, 0.2833333, 0.21666666] cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],", "= model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [1.033333, 1.3166667, 0.75]", "assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian, teardown)", "3.91657228e-02], [ 3.10025519e-01, 6.89974481e-01], [ 1.79862100e-02, 9.82013790e-01], [ 5.00000000e-01, 5.00000000e-01],", "hmm3 rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 })", "[ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [", "1.4, 2.6, nan], [ nan, nan, nan], [ nan, 3.6,", "[ -4.01814993e+00, -1.81499279e-02], [ -6.93147181e-01, -6.93147181e-01], [ -9.80005545e+00, -5.54500620e-05], [", "mu, cov = [0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu,", "d2 = IndependentComponentsDistribution([d21, d22, d23]) global model model = BayesClassifier([d1,", "9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01],", "[ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07, -1.50000003e+01], [ -3.35406373e-04, -8.00033541e+00], [", "__future__ import (division) from pomegranate import * from pomegranate.io import", "-6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba_parallel(): y_hat =", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0],", "cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc1 = BayesClassifier([d1, d2]) bc1.fit(X,", "[[0.242222, 0.0594444, 0.178333], [0.059444, 0.5980555, 0.414166], [0.178333, 0.4141666, 0.439166]] d21", "= model.distributions[0].parameters[1] mu1_t = [1.033333, 1.3166667, 0.75] cov1_t = [[0.242222,", "import DataFrameGenerator from nose.tools import with_setup from nose.tools import assert_almost_equal", "0.5) hmm3.add_transition(hmm3.start, rigged, 0.5) hmm3.add_transition(rigged, rigged, 0.5) hmm3.add_transition(rigged, unrigged, 0.5)", "0.53187499], [0.47937500, 0.53187499, 0.78687500]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t)", "0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict(): y_hat =", "[-1.2, -1.8, -1.5], [-1.8, 0.3, 0.5], [ 0.7, -1.3, -0.1]])", "-2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d, 1) @with_setup(setup_hmm,", "MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_to_json(): model2 = BayesClassifier.from_json(model.to_json())", "2.6, nan], [ nan, nan, nan], [ nan, 3.6, 3.3],", "BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed,", "def test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian, teardown)", "assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm, teardown) def test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')),", "0, 1, 1, 1, 1, 0, 0, 0] global X_nan", ") assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')),", "-0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm,", "-1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [ -3.05902274e-07,", "0.7, 1.3, 0.1]]) global y y = [0, 0, 0,", "[ 1.8, 0.3, 0.5], [ 0.7, 1.3, 0.1]]) global y", "0.23687499, 0.4793750], [0.23687499, 0.40187499, 0.5318749], [0.47937500, 0.53187499, 0.7868750]] assert_array_almost_equal(mu1, mu1_t)", "teardown) def test_io_log_probability(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) logp1", "0, 0, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def", "teardown) def test_bc_multivariate_mixed_predict(): y_hat = model.predict(X) y = [0, 0,", "assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_to_json():", "model.log_probability(X2) logp3 = model.log_probability(X3) assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian, teardown)", "@with_setup(setup_multivariate_gaussian, teardown) def test_io_predict(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X))", "logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1],", "assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_pickle():", "def test_bc_multivariate_mixed_predict(): y_hat = model.predict(X) y = [0, 0, 0,", "cov1_t = [[1.3088888, 0.9272222, 0.6227777], [0.9272222, 2.2513888, 1.3402777], [0.6227777, 1.3402777,", "from nose.tools import assert_less_equal from nose.tools import assert_raises from nose.tools", "1) assert_equal(predicts[3], 1) assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian, teardown) def test_io_log_probability(): X2", "d22 = LogNormalDistribution(0.2, 0.8) d23 = PoissonDistribution(3) d2 = IndependentComponentsDistribution([d21,", "nan, 0.1], [ nan, 1.4, nan], [ 1.4, 2.6, nan],", "teardown) def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y = [[ 9.60834277e-01,", "1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01],", "assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_fit_parallel(): model.fit(X, y, n_jobs=2) mu1", "model.predict_log_proba(X, n_jobs=2) y = [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00],", "bc1.fit(X, y, weights) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2,", "model model = BayesClassifier([d1, d2]) global X X = numpy.array([[", "MultivariateGaussianDistribution(mu, cov) global model model = BayesClassifier([d1, d2]) global X", "test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.57980882e-01, -1.20093223e+00], [", "1.8], [-1.2, -1.8, -1.5], [ nan, 0.3, 0.5], [ nan,", "[ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y,", "0) assert_equal(predicts[2], 1) assert_equal(predicts[3], 1) assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian, teardown) def", "0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_fit_parallel(): model.fit(X, y, n_jobs=2)", "0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat = model.predict_proba(X_nan)", "teardown) def test_io_predict_log_proba(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1", "= BayesClassifier.from_samples(d, X=X, y=y, weights=weights) bc2 = BayesClassifier.from_samples(d, X=data_generator) logp1", "-8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00],", "test_io_log_probability(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) logp1 = model.log_probability(X)", "= [0, 0, 1, 1, 1, 1, 1, 0, 0,", "rigged, 0.5) hmm3.add_transition(rigged, rigged, 0.5) hmm3.add_transition(rigged, unrigged, 0.5) hmm3.add_transition(unrigged, rigged,", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[", "-3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2],", "0.9272222, 0.6227777], [0.9272222, 2.2513888, 1.3402777], [0.6227777, 1.3402777, 0.9547222]] mu2 =", "X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_log_proba(X) y_hat2", "assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown)", "= model.predict_proba(X) y_hat2 = model.predict_proba(X2) y_hat3 = model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2)", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0],", "rigged, 0.5) hmm3.add_transition(rigged, unrigged, 0.5) hmm3.add_transition(unrigged, rigged, 0.5) hmm3.add_transition(unrigged, unrigged,", "1.340277777], [0.622777777, 1.340277777, 0.9547222222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1]", "0, 1] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_parallel(): y_hat =", "def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[ -3.57980882e-01, -1.20093223e+00],", "= pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights)", "assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_pickle(): model2 =", "BayesClassifier([d1, d2]) bc2.fit(data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1,", "= numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) mu1 =", "d2]) bc2.fit(data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2)", "[[ 0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [ 0.57204244, 0.42795756], [", "hmm1.bake() hmm2 = HiddenMarkovModel() hmm2.start = unrigged hmm2.add_transition(unrigged, unrigged, 1)", "[0.9272222, 2.2513888, 1.3402777], [0.6227777, 1.3402777, 0.9547222]] mu2 = model.distributions[1].parameters[0] cov2", "-1.20735130e+00, -3.55230506e-01], [ -2.43174286e-01, -1.53310132e+00], [ -6.93147181e-01, -6.93147181e-01], [ -9.31781101e+00,", "list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293)", "d = MultivariateGaussianDistribution bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights) bc2", "assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379)", "pomegranate.io import DataGenerator from pomegranate.io import DataFrameGenerator from nose.tools import", "0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) mu, cov =", "bc1 = BayesClassifier([d1, d2]) bc1.fit(X, y, weights) d1 = MultivariateGaussianDistribution(mu1,", "= State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) ) unrigged", "hmm2 = HiddenMarkovModel() hmm2.start = unrigged hmm2.add_transition(unrigged, unrigged, 1) hmm2.bake()", "-2.29725479e-01, -1.58353505e+00], [ -1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def", "def test_hmm_prediction(): predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_equal(predicts[0],", "y = [[ -3.57980882e-01, -1.20093223e+00], [ -1.20735130e+00, -3.55230506e-01], [ -2.43174286e-01,", "cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_fit_parallel(): model.fit(X,", "0.5980555, 0.414166], [0.178333, 0.4141666, 0.439166]] d21 = model.distributions[1].distributions[0] d22 =", "assert_equal from nose.tools import assert_not_equal from nose.tools import assert_less_equal from", "0.21666666] cov1_t = [[1.3088888, 0.9272222, 0.6227777], [0.9272222, 2.2513888, 1.3402777], [0.6227777,", "hmm2.bake() hmm3 = HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged, 0.5) hmm3.add_transition(hmm3.start, rigged, 0.5)", "-1.5], [ nan, 0.3, 0.5], [ nan, -1.3, nan]]) def", "teardown) def test_bc_multivariate_mixed_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y = [0,", "1.2, 1.8, 1.5], [ nan, 0.3, 0.5], [ nan, 1.3,", "test_hmm_proba(): probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024)", "cov2 = model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t =", "[[1.308888888, 0.9272222222, 0.6227777777], [0.927222222, 2.251388888, 1.340277777], [0.622777777, 1.340277777, 0.9547222222]] mu2", "numpy.testing import assert_array_almost_equal import pandas import random import pickle import", "bc2 = BayesClassifier.from_samples(d, X=data_generator) logp1 = bc1.log_probability(X) logp2 = bc2.log_probability(X)", "0, 0, 0, 0]) mu2 = numpy.array([1, 1, 1, 1,", "model.log_probability(X) logp2 = model.log_probability(X2) logp3 = model.log_probability(X3) assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1,", "= model.predict_proba(X) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01],", "assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm, teardown)", "d22 = model.distributions[1].distributions[1] d23 = model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t)", "numpy.array([[ 0.3, nan, 0.1], [ nan, 1.4, nan], [ 1.4,", "= numpy.array([[ 0.3, nan, 0.1], [ nan, 1.4, nan], [", "[0.927222222, 2.251388888, 1.340277777], [0.622777777, 1.340277777, 0.9547222222]] mu2 = model.distributions[1].parameters[0] cov2", "= model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625] cov2_t = [[0.75687500,", "= [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01],", "1.5], [ 1.8, 0.3, 0.5], [ 0.7, 1.3, 0.1]]) global", "[ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba(): y_hat", "def test_io_fit(): X = numpy.random.randn(100, 5) + 0.5 weights =", "0.53187499, 0.7868750]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2, cov2_t)", "model.predict_log_proba(X_nan) y = [[ -3.57980882e-01, -1.20093223e+00], [ -1.20735130e+00, -3.55230506e-01], [", "0.8) d23 = PoissonDistribution(3) d2 = IndependentComponentsDistribution([d21, d22, d23]) global", "HiddenMarkovModel() hmm2.start = unrigged hmm2.add_transition(unrigged, unrigged, 1) hmm2.bake() hmm3 =", "hmm2.add_transition(unrigged, unrigged, 1) hmm2.bake() hmm3 = HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged, 0.5)", "= LogNormalDistribution(0.2, 0.8) d23 = PoissonDistribution(3) d2 = IndependentComponentsDistribution([d21, d22,", "logp3 = model.log_probability(X3) assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1, logp3) @with_setup(setup_multivariate_gaussian, teardown) def", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y", "-3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def", "-1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat =", "def test_bc_multivariate_mixed_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 =", "model.predict(X) y_hat2 = model.predict(X2) y_hat3 = model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1,", "0.39535127], [ 0.82997863, 0.17002137], [ 0.57204244, 0.42795756], [ 0.46425765, 0.53574235],", "[ nan, nan, 1.8], [ 1.2, 1.8, 1.5], [ nan,", "@with_setup(setup_hmm, teardown) def test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )", "-1.46833995e+00], [ -2.29725479e-01, -1.58353505e+00], [ -1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian,", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba(): y_hat = model.predict_log_proba(X) y", "MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_robust_from_json(): model2", "y_hat = model.predict(X) y = [0, 0, 0, 1, 1,", "2.2513888, 1.3402777], [0.6227777, 1.3402777, 0.9547222]] mu2 = model.distributions[1].parameters[0] cov2 =", "[ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [", "0) assert_equal(predicts[1], 0) assert_equal(predicts[2], 1) assert_equal(predicts[3], 1) assert_equal(predicts[4], 2) @with_setup(setup_multivariate_gaussian,", "pickle import numpy as np nan = numpy.nan def setup_multivariate_gaussian():", "test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian, teardown) def", "assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm, teardown) def test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097", "cov = [2, 2, 2], numpy.eye(3) d2 = MultivariateGaussianDistribution(mu, cov)", "y, weights) d1 = MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov)", "[ 9.99664650e-01, 3.35350130e-04], [ 9.99389121e-01, 6.10879359e-04]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown)", "-5.60369104e+00, -3.69104343e-03], [ -1.78390074e+00, -1.83900741e-01], [ -3.05902274e-07, -1.50000003e+01], [ -8.68361522e-02,", "[ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02, -2.36626165e+00], [", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X, n_jobs=2) y =", "mu1 = numpy.array([0, 0, 0, 0, 0]) mu2 = numpy.array([1,", "size=100) data_generator = DataGenerator(X, weights, y) mu1 = numpy.array([0, 0,", "DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_proba(X) y_hat2 = model.predict_proba(X2)", "ExponentialDistribution(5) d22 = LogNormalDistribution(0.2, 0.8) d23 = PoissonDistribution(3) d2 =", "test_bc_multivariate_gaussian_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y = [0, 0, 1,", "= MultivariateGaussianDistribution(mu, cov) mu, cov = [2, 2, 2], numpy.eye(3)", "3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01,", "setup_multivariate(): pass def teardown(): pass @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d,", "cov2_t) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 =", "[ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [", "0.09383084], [ 0.76969251, 0.23030749], [ 0.99798563, 0.00201437], [ 0.86297361, 0.13702639]]", "test_bc_multivariate_gaussian_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))", "= [0, 0, 0, 1, 1, 1, 1, 0, 0,", "= HiddenMarkovModel() hmm1.start = rigged hmm1.add_transition(rigged, rigged, 1) hmm1.bake() hmm2", "2.2, 1.7], [ 1.8, 2.2, 1.8], [ 1.2, 1.8, 1.5],", "model = BayesClassifier([hmm1, hmm2, hmm3]) def setup_multivariate(): pass def teardown():", "global hmm2 global hmm3 rigged = State( DiscreteDistribution({ 'H': 0.8,", "list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098)", "0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict(): y_hat", "0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_parallel(): y_hat = model.predict(X,", "mu2 = numpy.array([1, 1, 1, 1, 1]) cov = numpy.eye(5)", "import * from pomegranate.io import DataGenerator from pomegranate.io import DataFrameGenerator", "list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096)", "= model.log_probability(X) logp2 = model.log_probability(X2) logp3 = model.log_probability(X3) assert_array_almost_equal(logp1, logp2)", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y = [[", "= DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict_proba(X) y_hat2 = model.predict_proba(X2) y_hat3 =", "= model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t = [2.925, 2.825, 2.625]", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict(): y_hat = model.predict(X) y", "0.0594444, 0.178333], [0.059444, 0.5980555, 0.414166], [0.178333, 0.4141666, 0.439166]] d21 =", "assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346]) assert_array_almost_equal(d23.parameters,", "import assert_less_equal from nose.tools import assert_raises from nose.tools import assert_true", "y_hat = model.predict_proba(X_nan) y = [[ 9.60834277e-01, 3.91657228e-02], [ 3.10025519e-01,", "list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1],", "test_bc_multivariate_mixed_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[ 0.60464873, 0.39535127],", "-0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781", "DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict(X) y_hat2 = model.predict(X2)", "9.82013790e-01], [ 5.00000000e-01, 5.00000000e-01], [ 5.54485247e-05, 9.99944551e-01], [ 3.68423990e-03, 9.96315760e-01],", "9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.99664650e-01, 3.35350130e-04],", "-3.13332194e-01], [ -2.61764180e-01, -1.46833995e+00], [ -2.29725479e-01, -1.58353505e+00], [ -1.17299253e+00, -3.70251760e-01]]", "= numpy.nan def setup_multivariate_gaussian(): mu, cov = [0, 0, 0],", "[ 0.8, 1.4, 0.5], [ 1.4, 2.6, 1.8], [ 4.2,", "def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_mixed, teardown)", "y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y", "= model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.2833333, 0.21666666] cov1_t = [[1.308888888,", "-1.50000003e+01], [ -8.68361522e-02, -2.48683615e+00], [ -1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed,", "= unrigged hmm2.add_transition(unrigged, unrigged, 1) hmm2.bake() hmm3 = HiddenMarkovModel() hmm3.add_transition(hmm3.start,", "assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d, 1) @with_setup(setup_hmm, teardown) def test_hmm_log_proba():", "-1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273) assert_almost_equal(logs[4][1], -1.4007102236822906) assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm, teardown) def", "assert_almost_equal(probs[3][2], 0.346983479756311) assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm, teardown)", "1.3402777], [0.6227777, 1.3402777, 0.9547222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1]", "y_hat = model.predict_log_proba(X, n_jobs=2) y = [[ -1.48842547e-02, -4.21488425e+00], [", "[ 2.26032430e-06, 9.99997740e-01], [ 3.35350130e-04, 9.99664650e-01], [ 3.68423990e-03, 9.96315760e-01], [", "assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2], 0.18969740064786292) assert_almost_equal(probs[3][0], 0.041989459861032523)", "model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_log_proba(): X2", "test_bc_multivariate_mixed_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1]", "1.4, 2.6, 1.8], [ 4.2, 3.3, 3.7], [ 2.6, 3.6,", "= model.predict(X, n_jobs=2) y = [0, 0, 0, 1, 1,", "y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 =", "-2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def", "[0.47937500, 0.53187499, 0.78687500]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t) assert_array_almost_equal(cov2,", "from pomegranate import * from pomegranate.io import DataGenerator from pomegranate.io", "1.3, nan]]) def setup_hmm(): global model global hmm1 global hmm2", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_from_samples(): model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y) mu1", "unrigged, 0.5) hmm3.add_transition(unrigged, rigged, 0.5) hmm3.add_transition(unrigged, unrigged, 0.5) hmm3.bake() model", "[0.03333333, 0.28333333, 0.21666666] cov1_t = [[1.3088888, 0.9272222, 0.6227777], [0.9272222, 2.2513888,", "= bc1.log_probability(X) logp2 = bc2.log_probability(X) assert_array_almost_equal(logp1, logp2) def test_io_from_samples(): X", "-4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d, 1) @with_setup(setup_hmm, teardown) def", "y_hat = model.predict(X, n_jobs=2) y = [0, 0, 0, 1,", "2.6, 1.8], [ 4.2, 3.3, 3.7], [ 2.6, 3.6, 3.3],", "nan, nan, 1.8], [-1.2, -1.8, -1.5], [ nan, 0.3, 0.5],", "[ -3.35406373e-04, -8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown)", "[ 3.1, 2.2, 1.7], [ 1.8, 2.2, 1.8], [-1.2, -1.8,", "unrigged hmm2.add_transition(unrigged, unrigged, 1) hmm2.bake() hmm3 = HiddenMarkovModel() hmm3.add_transition(hmm3.start, unrigged,", "[ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown)", "y = [0, 0, 0, 1, 1, 0, 0, 0,", "[ -6.93147181e-01, -6.93147181e-01], [ -9.80005545e+00, -5.54500620e-05], [ -5.60369104e+00, -3.69104343e-03], [", "assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_proba(): X2 = DataGenerator(X) X3", "teardown) def test_bc_multivariate_gaussian_predict_proba(): y_hat = model.predict_proba(X) y = [[ 9.85225968e-01,", "[0, 1, 1, 0, 1, 1, 1, 0, 0, 0]", "global hmm1 global hmm2 global hmm3 rigged = State( DiscreteDistribution({", "= [[ -5.03107596e-01, -9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01],", "1, 1, 1]) cov = numpy.eye(5) d1 = MultivariateGaussianDistribution(mu1, cov)", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False)", "-0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2],", "assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2,", "-4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01,", "assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494)", "-1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d,", "0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_fit_parallel(): model.fit(X, y,", "assert_almost_equal(logs[4][2], -0.7284958836972919) @with_setup(setup_hmm, teardown) def test_hmm_proba(): probs = model.predict_proba(np.array([list('H'), list('THHH'),", "0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_proba():", "LogNormalDistribution(0.2, 0.8) d23 = PoissonDistribution(3) d2 = IndependentComponentsDistribution([d21, d22, d23])", "assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')),", "cov2_t = [[0.75687500, 0.23687499, 0.47937500], [0.23687499, 0.40187499, 0.53187499], [0.47937500, 0.53187499,", "1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y =", "teardown) def test_bc_multivariate_mixed_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1", "y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba(): y_hat = model.predict_log_proba(X_nan) y =", "n_jobs=2) y = [0, 0, 0, 1, 1, 0, 0,", "[ -6.29755079e-04, -7.37049444e+00], [ -1.31307006e+00, -3.13332194e-01], [ -2.61764180e-01, -1.46833995e+00], [", "hmm3.add_transition(unrigged, rigged, 0.5) hmm3.add_transition(unrigged, unrigged, 0.5) hmm3.bake() model = BayesClassifier([hmm1,", "[ -9.80005545e+00, -5.54500620e-05], [ -5.60369104e+00, -3.69104343e-03], [ -1.78390074e+00, -1.83900741e-01], [", "1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict():", "= [2.925, 2.825, 2.625] cov2_t = [[0.75687500, 0.23687499, 0.47937500], [0.23687499,", "-6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_log_proba():", "def test_bc_multivariate_gaussian_predict_proba(): y_hat = model.predict_proba(X) y = [[ 9.85225968e-01, 1.47740317e-02],", "[ 9.99999694e-01, 3.05902227e-07], [ 9.16827304e-01, 8.31726965e-02], [ 9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y,", "hmm3.add_transition(rigged, unrigged, 0.5) hmm3.add_transition(unrigged, rigged, 0.5) hmm3.add_transition(unrigged, unrigged, 0.5) hmm3.bake()", "y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y", "= MultivariateGaussianDistribution(mu, cov) d21 = ExponentialDistribution(5) d22 = LogNormalDistribution(0.2, 0.8)", "weights, y) mu1 = numpy.array([0, 0, 0, 0, 0]) mu2", "[ -1.78390074e+00, -1.83900741e-01], [ -3.05902274e-07, -1.50000003e+01], [ -8.68361522e-02, -2.48683615e+00], [", "list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639) assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0],", "model.predict_log_proba(X2) y_hat3 = model.predict_log_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) def test_io_fit():", "model.predict(X2) y_hat3 = model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown)", "6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06,", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict(): y_hat = model.predict(X) y = [0,", "= [0, 0, 0], numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) mu,", "BayesClassifier.from_samples(d, X=X, y=y, weights=weights) bc2 = BayesClassifier.from_samples(d, X=data_generator) logp1 =", "model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights,", "-3.70251760e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_log_proba_parallel(): y_hat = model.predict_log_proba(X,", "IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_hmm, teardown) def test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )", "model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [1.033333, 1.3166667, 0.75] cov1_t", "7.31007036e-01], [ 7.69692511e-01, 2.30307489e-01], [ 7.94751748e-01, 2.05248252e-01], [ 3.09439547e-01, 6.90560453e-01]]", "0, 0] global X_nan X_nan = numpy.array([[ 0.3, nan, 0.1],", "model2.weights) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier))", "cov) global model model = BayesClassifier([d1, d2]) global X X", "test_bc_multivariate_gaussian_predict_proba(): y_hat = model.predict_proba(X) y = [[ 9.85225968e-01, 1.47740317e-02], [", "= [[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03],", "assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)", ") assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 ) assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417) assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)", "MultivariateGaussianDistribution(mu1, cov) d2 = MultivariateGaussianDistribution(mu2, cov) bc2 = BayesClassifier([d1, d2])", "0.5], [ nan, -1.3, nan]]) def setup_multivariate_mixed(): mu, cov =", "DataGenerator(X, weights, y) mu1 = numpy.array([0, 0, 0, 0, 0])", "numpy.random.randint(2, size=100) data_generator = DataGenerator(X, weights, y) mu1 = numpy.array([0,", "assert_almost_equal(probs[4][0], 0.27094373022369794) assert_almost_equal(probs[4][1], 0.24642188711704707) assert_almost_equal(probs[4][2], 0.48263438265925512) @with_setup(setup_hmm, teardown) def test_hmm_prediction():", "-8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba_parallel():", "model2 = from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights,", "1.8, 1.5], [ nan, 0.3, 0.5], [ nan, 1.3, nan]])", "assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 ) assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 ) assert_almost_equal(hmm2.log_probability(list('THHH')),", "-6.93147181e-01, -6.93147181e-01], [ -9.80005545e+00, -5.54500620e-05], [ -5.60369104e+00, -3.69104343e-03], [ -1.78390074e+00,", "9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08,", "model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier))", "3.00913560e-01], [ 2.98988163e-01, 7.01011837e-01], [ 7.84134838e-01, 2.15865162e-01], [ 5.00000000e-01, 5.00000000e-01],", "-2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [ -1.47371167e-01, -1.98758175e+00]]", "def setup_multivariate_gaussian(): mu, cov = [0, 0, 0], numpy.eye(3) d1", "1, 1, 0, 0, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian,", "MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_robust_from_json(): model2 = from_json(model.to_json())", "0.5], [ nan, 1.3, nan]]) def setup_hmm(): global model global", "assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343) assert_equal(model.d, 1) @with_setup(setup_hmm, teardown) def test_hmm_log_proba(): logs =", "= model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(probs[0][0], 0.41025641025641024) assert_almost_equal(probs[0][1], 0.25641025641025639)", "nan, 1.8], [-1.2, -1.8, -1.5], [ nan, 0.3, 0.5], [", "from __future__ import (division) from pomegranate import * from pomegranate.io", "0.60464873, 0.39535127], [ 0.82997863, 0.17002137], [ 0.57204244, 0.42795756], [ 0.46425765,", "= BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights)", "9.99999694e-01, 3.05902227e-07], [ 9.16827304e-01, 8.31726965e-02], [ 9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y, y_hat)", "0.90256873], [ 0.99693828, 0.00306172], [ 0.90616916, 0.09383084], [ 0.76969251, 0.23030749],", "@with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False)", "[ -1.47371167e-01, -1.98758175e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba(): y_hat", "3.1, nan, 1.7], [ nan, nan, 1.8], [-1.2, -1.8, -1.5],", "n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333,", "-8.98143220e-05], [ -6.29755079e-04, -7.37049444e+00], [ -1.31307006e+00, -3.13332194e-01], [ -2.61764180e-01, -1.46833995e+00],", "[ 9.99370443e-01, 6.29556825e-04], [ 2.68992964e-01, 7.31007036e-01], [ 7.69692511e-01, 2.30307489e-01], [", "2.625] cov2_t = [[0.75687500, 0.23687499, 0.47937500], [0.23687499, 0.40187499, 0.53187499], [0.47937500,", "[ 3.68423990e-03, 9.96315760e-01], [ 1.67981615e-01, 8.32018385e-01], [ 9.99999694e-01, 3.05902227e-07], [", "-1.31307006e+00, -3.13332194e-01], [ -2.61764180e-01, -1.46833995e+00], [ -2.29725479e-01, -1.58353505e+00], [ -1.17299253e+00,", "DataGenerator from pomegranate.io import DataFrameGenerator from nose.tools import with_setup from", "unrigged, 0.5) hmm3.bake() model = BayesClassifier([hmm1, hmm2, hmm3]) def setup_multivariate():", "0.178333], [0.059444, 0.5980555, 0.414166], [0.178333, 0.4141666, 0.439166]] d21 = model.distributions[1].distributions[0]", "assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) def test_io_fit(): X = numpy.random.randn(100, 5)", "1, 1, 1, 1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed,", "= model.predict(X) y = [0, 0, 1, 1, 1, 1,", "import (division) from pomegranate import * from pomegranate.io import DataGenerator", "def test_io_from_samples(): X = numpy.random.randn(100, 5) + 0.5 weights =", "0, 0, 1, 1, 0, 0, 0, 0, 0] assert_array_almost_equal(y,", "d23 = model.distributions[1].distributions[2] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(d21.parameters, [0.34188034]) assert_array_almost_equal(d22.parameters,", "0, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict():", "0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088) assert_almost_equal(probs[2][1], 0.79007663743383105) assert_almost_equal(probs[2][2],", "-6.93147181e-01, -6.93147181e-01], [ -9.31781101e+00, -8.98143220e-05], [ -6.29755079e-04, -7.37049444e+00], [ -1.31307006e+00,", "1, 0, 0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict():", "@with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba(): y_hat = model.predict_proba(X) y = [[", "test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat = model.predict_proba(X, n_jobs=2) y = [[ 9.85225968e-01, 1.47740317e-02],", "[ -5.60369104e+00, -3.69104343e-03], [ -1.64000001e+01, -7.54345812e-08], [ -1.30000023e+01, -2.26032685e-06], [", "[ -2.61764180e-01, -1.46833995e+00], [ -2.29725479e-01, -1.58353505e+00], [ -1.17299253e+00, -3.70251760e-01]] assert_array_almost_equal(y,", "[ 9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_proba(): y_hat", "0, 0] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_parallel(): y_hat =", "= [0.03333333, 0.28333333, 0.21666666] cov1_t = [[1.3088888, 0.9272222, 0.6227777], [0.9272222,", "= model.predict_proba(X2) y_hat3 = model.predict_proba(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian,", "3.05902227e-07], [ 9.16827304e-01, 8.31726965e-02], [ 9.90048198e-01, 9.95180187e-03]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed,", "assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_gaussian, teardown)", "[ nan, 3.6, 3.3], [ 3.1, nan, 1.7], [ nan,", "d2 = MultivariateGaussianDistribution(mu2, cov) bc1 = BayesClassifier([d1, d2]) bc1.fit(X, y,", "3.68423990e-03, 9.96315760e-01], [ 1.67981615e-01, 8.32018385e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.16827304e-01,", "[ 1.67981615e-01, 8.32018385e-01], [ 9.99999694e-01, 3.05902227e-07], [ 9.16827304e-01, 8.31726965e-02], [", "@with_setup(setup_hmm, teardown) def test_hmm_log_proba(): logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'),", "assert_almost_equal(probs[0][2], 0.33333333333333331) assert_almost_equal(probs[1][0], 0.39230898163446098) assert_almost_equal(probs[1][1], 0.23944639992337707) assert_almost_equal(probs[1][2], 0.36824461844216183) assert_almost_equal(probs[2][0], 0.020225961918306088)", "-8.00033541e+00], [ -6.11066022e-04, -7.40061107e+00]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_predict_log_proba():", "MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution)) assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_pickle(): model2", "mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.28333333,", "[ -9.85292840e-02, -2.36626165e+00], [ -2.61764180e-01, -1.46833995e+00], [ -2.01640009e-03, -6.20744952e+00], [", "0, 0, 1] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_parallel(): y_hat", "assert_equal(predicts[0], 0) assert_equal(predicts[1], 0) assert_equal(predicts[2], 1) assert_equal(predicts[3], 1) assert_equal(predicts[4], 2)", "0.5318749], [0.47937500, 0.53187499, 0.7868750]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2, mu2_t)", "-1.0498221244986776) assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167) assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397) assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105) assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788) assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')),", "9.99370443e-01, 6.29556825e-04], [ 2.68992964e-01, 7.31007036e-01], [ 7.69692511e-01, 2.30307489e-01], [ 7.94751748e-01,", "9.60834277e-01, 3.91657228e-02], [ 3.10025519e-01, 6.89974481e-01], [ 1.79862100e-02, 9.82013790e-01], [ 5.00000000e-01,", "1.4, nan], [ 1.4, 2.6, nan], [ nan, nan, nan],", "-9.27980626e-01], [ -1.86355320e-01, -1.77183117e+00], [ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01],", "assert_array_almost_equal(cov2, cov2_t) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2,", "= model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1] mu1_t = [0.03333333, 0.28333333, 0.21666666]", "teardown) def test_io_predict(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1", "[ 0.99798563, 0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown)", "global model model = BayesClassifier([d1, d2]) global X X =", "size=100) data_generator = DataGenerator(X, weights, y) d = MultivariateGaussianDistribution bc1", "model.predict_proba(X, n_jobs=2) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01, 3.54343694e-01],", "logp2) def test_io_from_samples(): X = numpy.random.randn(100, 5) + 0.5 weights", "0.8, 'T': 0.2 }) ) unrigged = State( DiscreteDistribution({ 'H':", "test_io_predict(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict(X)", "random import pickle import numpy as np nan = numpy.nan", "6.29556825e-04], [ 2.68992964e-01, 7.31007036e-01], [ 7.69692511e-01, 2.30307489e-01], [ 7.94751748e-01, 2.05248252e-01],", "model.predict(X3) assert_array_almost_equal(y_hat1, y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_proba(): X2", "y_hat3) def test_io_fit(): X = numpy.random.randn(100, 5) + 0.5 weights", "list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')])) assert_almost_equal(logs[0][0], -0.89097292388986515) assert_almost_equal(logs[0][1], -1.3609765531356006) assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0],", "-0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864) assert_almost_equal(logs[2][1], -0.23562532881626597) assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1],", "-7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [ -3.06641866e-03, -5.78877778e+00], [ -9.85292840e-02,", "= model.predict_proba(X, n_jobs=2) y = [[ 9.85225968e-01, 1.47740317e-02], [ 6.45656306e-01,", "[ 4.2, 3.3, 3.7], [ 2.6, 3.6, 3.3], [ 3.1,", "teardown) def test_model(): assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')),", "test_bc_multivariate_mixed_predict_proba(): y_hat = model.predict_proba(X) y = [[ 0.60464873, 0.39535127], [", "= DataFrameGenerator(pandas.DataFrame(X)) y_hat1 = model.predict(X) y_hat2 = model.predict(X2) y_hat3 =", "[ -1.30000023e+01, -2.26032685e-06], [ -8.00033541e+00, -3.35406373e-04], [ -5.60369104e+00, -3.69104343e-03], [", "= model.predict(X_nan) y = [0, 1, 0, 0, 1, 0,", "0.6227777], [0.9272222, 2.2513888, 1.3402777], [0.6227777, 1.3402777, 0.9547222]] mu2 = model.distributions[1].parameters[0]", "0.47937500], [0.23687499, 0.40187499, 0.53187499], [0.47937500, 0.53187499, 0.78687500]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1,", "assert_almost_equal(logs[0][2], -1.0986122886681096) assert_almost_equal(logs[1][0], -0.93570553121744293) assert_almost_equal(logs[1][1], -1.429425687080494) assert_almost_equal(logs[1][2], -0.9990078376167526) assert_almost_equal(logs[2][0], -3.9007882563128864)", "0.76969251, 0.23030749], [ 0.99798563, 0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat)", "numpy.eye(3) d1 = MultivariateGaussianDistribution(mu, cov) d21 = ExponentialDistribution(5) d22 =", "test_bc_multivariate_mixed_predict(): y_hat = model.predict(X) y = [0, 0, 0, 1,", "[0, 0, 0, 1, 1, 1, 1, 0, 0, 0]", "cov) bc1 = BayesClassifier([d1, d2]) bc1.fit(X, y, weights) d1 =", "0.40187499, 0.5318749], [0.47937500, 0.53187499, 0.7868750]] assert_array_almost_equal(mu1, mu1_t) assert_array_almost_equal(cov1, cov1_t) assert_array_almost_equal(mu2,", "assert_array_almost_equal(y_hat1, y_hat3) def test_io_fit(): X = numpy.random.randn(100, 5) + 0.5", "1.8], [ 4.2, 3.3, 3.7], [ 2.6, 3.6, 3.3], [", "0, 1, 0, 1, 0, 0, 1] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian,", "test_bc_multivariate_gaussian_fit_parallel(): model.fit(X, y, n_jobs=2) mu1 = model.distributions[0].parameters[0] cov1 = model.distributions[0].parameters[1]", "[[ -3.57980882e-01, -1.20093223e+00], [ -1.20735130e+00, -3.55230506e-01], [ -2.43174286e-01, -1.53310132e+00], [", "3.7], [ 2.6, 3.6, 3.3], [ 3.1, 2.2, 1.7], [", "hmm2.start = unrigged hmm2.add_transition(unrigged, unrigged, 1) hmm2.bake() hmm3 = HiddenMarkovModel()", "1.4, 0.5], [ 1.4, 2.6, 1.8], [ 4.2, 3.3, 3.7],", "def test_io_predict(): X2 = DataGenerator(X) X3 = DataFrameGenerator(pandas.DataFrame(X)) y_hat1 =", "assert_almost_equal(logs[2][2], -1.6623251045711958) assert_almost_equal(logs[3][0], -3.1703366478831185) assert_almost_equal(logs[3][1], -0.49261403211260379) assert_almost_equal(logs[3][2], -1.058478108940049) assert_almost_equal(logs[4][0], -1.3058441172130273)", "3.09439547e-01, 6.90560453e-01]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_predict_proba_parallel(): y_hat =", "0.23030749], [ 0.99798563, 0.00201437], [ 0.86297361, 0.13702639]] assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_gaussian,", "-0.2231435513142097 ) assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 ) assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 ) assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296", "assert_array_almost_equal(model.weights, model2.weights) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_robust_from_json(): model2 = from_json(model.to_json()) assert_true(isinstance(model2,", "6.99086440e-01, 3.00913560e-01], [ 2.98988163e-01, 7.01011837e-01], [ 7.84134838e-01, 2.15865162e-01], [ 5.00000000e-01,", "y_hat) @with_setup(setup_multivariate_gaussian, teardown) def test_bc_multivariate_gaussian_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y =", "[ 1.79862100e-02, 9.82013790e-01], [ 5.00000000e-01, 5.00000000e-01], [ 5.54485247e-05, 9.99944551e-01], [", "X = numpy.random.randn(100, 5) + 0.5 weights = numpy.abs(numpy.random.randn(100)) y", "def test_bc_multivariate_gaussian_to_json(): model2 = BayesClassifier.from_json(model.to_json()) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1],", "[ -5.58542088e-01, -8.48731256e-01], [ -7.67315597e-01, -6.24101927e-01], [ -2.32860808e+00, -1.02510436e-01], [", "1.2, 1.8, 1.5], [ 1.8, 0.3, 0.5], [ 0.7, 1.3,", "teardown) def test_bc_multivariate_mixed_initialization(): assert_equal(model.d, 3) assert_equal(model.n, 2) assert_equal(model.is_vl_, False) @with_setup(setup_multivariate_gaussian,", "def test_bc_multivariate_mixed_pickle(): model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1],", "3.54343694e-01], [ 3.68423990e-03, 9.96315760e-01], [ 7.54345778e-08, 9.99999925e-01], [ 2.26032430e-06, 9.99997740e-01],", "= [0, 0, 0, 1, 1, 0, 0, 0, 0,", "model global hmm1 global hmm2 global hmm3 rigged = State(", "def test_bc_multivariate_mixed_predict_proba(): y_hat = model.predict_proba(X) y = [[ 0.60464873, 0.39535127],", "-3.05902274e-07, -1.50000003e+01], [ -8.68361522e-02, -2.48683615e+00], [ -1.00016521e-02, -4.61000165e+00]] assert_array_almost_equal(y, y_hat)", "1.3402777, 0.9547222]] mu2 = model.distributions[1].parameters[0] cov2 = model.distributions[1].parameters[1] mu2_t =", "teardown) def test_bc_multivariate_gaussian_predict_log_proba(): y_hat = model.predict_log_proba(X) y = [[ -1.48842547e-02,", "0.1], [ 0.8, 1.4, 0.5], [ 1.4, 2.6, 1.8], [", "assert_array_almost_equal(y, y_hat) @with_setup(setup_multivariate_mixed, teardown) def test_bc_multivariate_mixed_nan_predict_proba(): y_hat = model.predict_proba(X_nan) y", "y_hat2) assert_array_almost_equal(y_hat1, y_hat3) @with_setup(setup_multivariate_gaussian, teardown) def test_io_predict_proba(): X2 = DataGenerator(X)", "model2 = pickle.loads(pickle.dumps(model)) assert_true(isinstance(model2, BayesClassifier)) assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution)) assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution)) assert_array_almost_equal(model.weights,", "def test_bc_multivariate_mixed_predict_parallel(): y_hat = model.predict(X, n_jobs=2) y = [0, 0,", "[[ -1.48842547e-02, -4.21488425e+00], [ -4.37487950e-01, -1.03748795e+00], [ -5.60369104e+00, -3.69104343e-03], [", "= [[1.308888888, 0.9272222222, 0.6227777777], [0.927222222, 2.251388888, 1.340277777], [0.622777777, 1.340277777, 0.9547222222]]", "logp2 = model.log_probability(X2) logp3 = model.log_probability(X3) assert_array_almost_equal(logp1, logp2) assert_array_almost_equal(logp1, logp3)" ]
[ "dict): if config.get(\"VARIABLE_RANKING\"): output = VariableRanking(sol, base_kernel) else: output =", "ImportError: print(\"Gurobi not found: error ignored to allow tests\") def", "Solution, base_kernel: dict, config: dict): if config.get(\"VARIABLE_RANKING\"): output = VariableRanking(sol,", "var_name): return self.score[var_name] def success_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError def", "curr_kernel, curr_bucket): for var in curr_bucket: if curr_kernel[var]: self.score[var] +=", "name, value): if value == 0: self.score[name] += 0.1 else:", "if base_kernel[k] else v for k, v in solution.vars.items()} def", "failure_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError class ReducedCostScoring(AbstactVariableScoring): def success_update_score(self, curr_kernel,", "= {k: 0 if base_kernel[k] else v for k, v", "Solution, base_kernel: dict): self.score = {k: 0 if base_kernel[k] else", "if curr_kernel[var]: self.score[var] += 1 else: self.score[var] -= 1 def", "tests\") def variable_score_factory(sol: Solution, base_kernel: dict, config: dict): if config.get(\"VARIABLE_RANKING\"):", "config: dict): if config.get(\"VARIABLE_RANKING\"): output = VariableRanking(sol, base_kernel) else: output", "success_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket: if curr_kernel[var]: self.score[var]", "VariableRanking(AbstactVariableScoring): def cb_update_score(self, name, value): if value == 0: self.score[name]", "success_update_score(self, curr_kernel, curr_bucket): pass def failure_update_score(self, curr_kernel, curr_bucket): pass class", "v in solution.vars.items()} def get_value(self, var_name): return self.score[var_name] def success_update_score(self,", "where): if where == gurobipy.GRB.Callback.MIPSOL: for var in model.getVars(): value", "def cb_update_score(self, name, value): if value == 0: self.score[name] +=", "allow tests\") def variable_score_factory(sol: Solution, base_kernel: dict, config: dict): if", "from .solution import Solution try: import gurobipy except ImportError: print(\"Gurobi", "#! /usr/bin/python from .solution import Solution try: import gurobipy except", "/usr/bin/python from .solution import Solution try: import gurobipy except ImportError:", "AbstactVariableScoring: def __init__(self, solution: Solution, base_kernel: dict): self.score = {k:", "import Solution try: import gurobipy except ImportError: print(\"Gurobi not found:", "return output def __build_callback__(scoring): def callback(model, where): if where ==", "VariableRanking): output = __build_callback__(scoring) else: output = None return output", "0.1 else: self.score[name] -= 0.1 def success_update_score(self, curr_kernel, curr_bucket): for", "self.score[var_name] def success_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError def failure_update_score(self, curr_kernel,", "pass def failure_update_score(self, curr_kernel, curr_bucket): pass class VariableRanking(AbstactVariableScoring): def cb_update_score(self,", "+= 0.1 else: self.score[name] -= 0.1 def success_update_score(self, curr_kernel, curr_bucket):", "raise NotImplementedError class ReducedCostScoring(AbstactVariableScoring): def success_update_score(self, curr_kernel, curr_bucket): pass def", "output = None return output def __build_callback__(scoring): def callback(model, where):", "base_kernel: dict, config: dict): if config.get(\"VARIABLE_RANKING\"): output = VariableRanking(sol, base_kernel)", "in curr_bucket: if curr_kernel[var]: self.score[var] -= 15 else: self.score[var] +=", "curr_bucket): for var in curr_bucket: if curr_kernel[var]: self.score[var] -= 15", "solution.vars.items()} def get_value(self, var_name): return self.score[var_name] def success_update_score(self, curr_kernel, curr_bucket):", "except ImportError: print(\"Gurobi not found: error ignored to allow tests\")", "if config.get(\"VARIABLE_RANKING\"): output = VariableRanking(sol, base_kernel) else: output = ReducedCostScoring(sol,", "= ReducedCostScoring(sol, base_kernel) return output class AbstactVariableScoring: def __init__(self, solution:", "def get_value(self, var_name): return self.score[var_name] def success_update_score(self, curr_kernel, curr_bucket): raise", "def variable_score_factory(sol: Solution, base_kernel: dict, config: dict): if config.get(\"VARIABLE_RANKING\"): output", "Solution try: import gurobipy except ImportError: print(\"Gurobi not found: error", "15 else: self.score[var] += 15 def failure_update_score(self, curr_kernel, curr_bucket): for", "base_kernel) return output class AbstactVariableScoring: def __init__(self, solution: Solution, base_kernel:", "output = VariableRanking(sol, base_kernel) else: output = ReducedCostScoring(sol, base_kernel) return", "if where == gurobipy.GRB.Callback.MIPSOL: for var in model.getVars(): value =", "in solution.vars.items()} def get_value(self, var_name): return self.score[var_name] def success_update_score(self, curr_kernel,", "curr_kernel, curr_bucket): pass def failure_update_score(self, curr_kernel, curr_bucket): pass class VariableRanking(AbstactVariableScoring):", "to allow tests\") def variable_score_factory(sol: Solution, base_kernel: dict, config: dict):", "failure_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket: if curr_kernel[var]: self.score[var]", "else: self.score[var] -= 1 def callback_factory(scoring: AbstactVariableScoring): if isinstance(scoring, VariableRanking):", "output = ReducedCostScoring(sol, base_kernel) return output class AbstactVariableScoring: def __init__(self,", "dict, config: dict): if config.get(\"VARIABLE_RANKING\"): output = VariableRanking(sol, base_kernel) else:", "pass class VariableRanking(AbstactVariableScoring): def cb_update_score(self, name, value): if value ==", "config.get(\"VARIABLE_RANKING\"): output = VariableRanking(sol, base_kernel) else: output = ReducedCostScoring(sol, base_kernel)", "not found: error ignored to allow tests\") def variable_score_factory(sol: Solution,", "gurobipy.GRB.Callback.MIPSOL: for var in model.getVars(): value = model.cbGetSolution(var) scoring.cb_update_score(var.varName, value)", "curr_bucket: if curr_kernel[var]: self.score[var] += 1 else: self.score[var] -= 1", "error ignored to allow tests\") def variable_score_factory(sol: Solution, base_kernel: dict,", "0: self.score[name] += 0.1 else: self.score[name] -= 0.1 def success_update_score(self,", "curr_kernel, curr_bucket): pass class VariableRanking(AbstactVariableScoring): def cb_update_score(self, name, value): if", "curr_bucket): raise NotImplementedError def failure_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError class", "variable_score_factory(sol: Solution, base_kernel: dict, config: dict): if config.get(\"VARIABLE_RANKING\"): output =", "None return output def __build_callback__(scoring): def callback(model, where): if where", "dict): self.score = {k: 0 if base_kernel[k] else v for", "for var in curr_bucket: if curr_kernel[var]: self.score[var] += 1 else:", "base_kernel[k] else v for k, v in solution.vars.items()} def get_value(self,", "curr_bucket: if curr_kernel[var]: self.score[var] -= 15 else: self.score[var] += 15", "1 else: self.score[var] -= 1 def callback_factory(scoring: AbstactVariableScoring): if isinstance(scoring,", "NotImplementedError def failure_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError class ReducedCostScoring(AbstactVariableScoring): def", "cb_update_score(self, name, value): if value == 0: self.score[name] += 0.1", "else v for k, v in solution.vars.items()} def get_value(self, var_name):", "+= 1 else: self.score[var] -= 1 def callback_factory(scoring: AbstactVariableScoring): if", "found: error ignored to allow tests\") def variable_score_factory(sol: Solution, base_kernel:", "ignored to allow tests\") def variable_score_factory(sol: Solution, base_kernel: dict, config:", "{k: 0 if base_kernel[k] else v for k, v in", "value == 0: self.score[name] += 0.1 else: self.score[name] -= 0.1", "if isinstance(scoring, VariableRanking): output = __build_callback__(scoring) else: output = None", "NotImplementedError class ReducedCostScoring(AbstactVariableScoring): def success_update_score(self, curr_kernel, curr_bucket): pass def failure_update_score(self,", "var in curr_bucket: if curr_kernel[var]: self.score[var] += 1 else: self.score[var]", "output def __build_callback__(scoring): def callback(model, where): if where == gurobipy.GRB.Callback.MIPSOL:", "else: output = None return output def __build_callback__(scoring): def callback(model,", "def __build_callback__(scoring): def callback(model, where): if where == gurobipy.GRB.Callback.MIPSOL: for", "output class AbstactVariableScoring: def __init__(self, solution: Solution, base_kernel: dict): self.score", "curr_kernel[var]: self.score[var] += 1 else: self.score[var] -= 1 def callback_factory(scoring:", "def success_update_score(self, curr_kernel, curr_bucket): pass def failure_update_score(self, curr_kernel, curr_bucket): pass", "def callback_factory(scoring: AbstactVariableScoring): if isinstance(scoring, VariableRanking): output = __build_callback__(scoring) else:", "class VariableRanking(AbstactVariableScoring): def cb_update_score(self, name, value): if value == 0:", "for var in model.getVars(): value = model.cbGetSolution(var) scoring.cb_update_score(var.varName, value) return", "1 def callback_factory(scoring: AbstactVariableScoring): if isinstance(scoring, VariableRanking): output = __build_callback__(scoring)", "curr_bucket): pass def failure_update_score(self, curr_kernel, curr_bucket): pass class VariableRanking(AbstactVariableScoring): def", "if value == 0: self.score[name] += 0.1 else: self.score[name] -=", "== gurobipy.GRB.Callback.MIPSOL: for var in model.getVars(): value = model.cbGetSolution(var) scoring.cb_update_score(var.varName,", "-= 1 def callback_factory(scoring: AbstactVariableScoring): if isinstance(scoring, VariableRanking): output =", "__build_callback__(scoring) else: output = None return output def __build_callback__(scoring): def", "def failure_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError class ReducedCostScoring(AbstactVariableScoring): def success_update_score(self,", "0 if base_kernel[k] else v for k, v in solution.vars.items()}", "= __build_callback__(scoring) else: output = None return output def __build_callback__(scoring):", "base_kernel: dict): self.score = {k: 0 if base_kernel[k] else v", "output = __build_callback__(scoring) else: output = None return output def", "self.score[name] += 0.1 else: self.score[name] -= 0.1 def success_update_score(self, curr_kernel,", "def __init__(self, solution: Solution, base_kernel: dict): self.score = {k: 0", "self.score[name] -= 0.1 def success_update_score(self, curr_kernel, curr_bucket): for var in", "get_value(self, var_name): return self.score[var_name] def success_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError", "curr_bucket): pass class VariableRanking(AbstactVariableScoring): def cb_update_score(self, name, value): if value", "== 0: self.score[name] += 0.1 else: self.score[name] -= 0.1 def", "def success_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError def failure_update_score(self, curr_kernel, curr_bucket):", "else: self.score[name] -= 0.1 def success_update_score(self, curr_kernel, curr_bucket): for var", "success_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError def failure_update_score(self, curr_kernel, curr_bucket): raise", "= VariableRanking(sol, base_kernel) else: output = ReducedCostScoring(sol, base_kernel) return output", "v for k, v in solution.vars.items()} def get_value(self, var_name): return", "gurobipy except ImportError: print(\"Gurobi not found: error ignored to allow", "def success_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket: if curr_kernel[var]:", "curr_kernel, curr_bucket): raise NotImplementedError class ReducedCostScoring(AbstactVariableScoring): def success_update_score(self, curr_kernel, curr_bucket):", "return self.score[var_name] def success_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError def failure_update_score(self,", "if curr_kernel[var]: self.score[var] -= 15 else: self.score[var] += 15 def", "+= 15 def failure_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket:", "= None return output def __build_callback__(scoring): def callback(model, where): if", "callback_factory(scoring: AbstactVariableScoring): if isinstance(scoring, VariableRanking): output = __build_callback__(scoring) else: output", "solution: Solution, base_kernel: dict): self.score = {k: 0 if base_kernel[k]", "0.1 def success_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket: if", "for var in curr_bucket: if curr_kernel[var]: self.score[var] -= 15 else:", "__init__(self, solution: Solution, base_kernel: dict): self.score = {k: 0 if", "-= 0.1 def success_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket:", "in curr_bucket: if curr_kernel[var]: self.score[var] += 1 else: self.score[var] -=", "else: output = ReducedCostScoring(sol, base_kernel) return output class AbstactVariableScoring: def", "else: self.score[var] += 15 def failure_update_score(self, curr_kernel, curr_bucket): for var", "k, v in solution.vars.items()} def get_value(self, var_name): return self.score[var_name] def", "for k, v in solution.vars.items()} def get_value(self, var_name): return self.score[var_name]", "self.score[var] -= 15 else: self.score[var] += 15 def failure_update_score(self, curr_kernel,", "isinstance(scoring, VariableRanking): output = __build_callback__(scoring) else: output = None return", "AbstactVariableScoring): if isinstance(scoring, VariableRanking): output = __build_callback__(scoring) else: output =", "curr_kernel, curr_bucket): for var in curr_bucket: if curr_kernel[var]: self.score[var] -=", ".solution import Solution try: import gurobipy except ImportError: print(\"Gurobi not", "-= 15 else: self.score[var] += 15 def failure_update_score(self, curr_kernel, curr_bucket):", "15 def failure_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket: if", "ReducedCostScoring(AbstactVariableScoring): def success_update_score(self, curr_kernel, curr_bucket): pass def failure_update_score(self, curr_kernel, curr_bucket):", "self.score[var] += 1 else: self.score[var] -= 1 def callback_factory(scoring: AbstactVariableScoring):", "try: import gurobipy except ImportError: print(\"Gurobi not found: error ignored", "curr_bucket): for var in curr_bucket: if curr_kernel[var]: self.score[var] += 1", "print(\"Gurobi not found: error ignored to allow tests\") def variable_score_factory(sol:", "class ReducedCostScoring(AbstactVariableScoring): def success_update_score(self, curr_kernel, curr_bucket): pass def failure_update_score(self, curr_kernel,", "import gurobipy except ImportError: print(\"Gurobi not found: error ignored to", "value): if value == 0: self.score[name] += 0.1 else: self.score[name]", "curr_kernel[var]: self.score[var] -= 15 else: self.score[var] += 15 def failure_update_score(self,", "self.score[var] += 15 def failure_update_score(self, curr_kernel, curr_bucket): for var in", "def failure_update_score(self, curr_kernel, curr_bucket): for var in curr_bucket: if curr_kernel[var]:", "callback(model, where): if where == gurobipy.GRB.Callback.MIPSOL: for var in model.getVars():", "__build_callback__(scoring): def callback(model, where): if where == gurobipy.GRB.Callback.MIPSOL: for var", "raise NotImplementedError def failure_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError class ReducedCostScoring(AbstactVariableScoring):", "curr_bucket): raise NotImplementedError class ReducedCostScoring(AbstactVariableScoring): def success_update_score(self, curr_kernel, curr_bucket): pass", "base_kernel) else: output = ReducedCostScoring(sol, base_kernel) return output class AbstactVariableScoring:", "VariableRanking(sol, base_kernel) else: output = ReducedCostScoring(sol, base_kernel) return output class", "self.score = {k: 0 if base_kernel[k] else v for k,", "class AbstactVariableScoring: def __init__(self, solution: Solution, base_kernel: dict): self.score =", "self.score[var] -= 1 def callback_factory(scoring: AbstactVariableScoring): if isinstance(scoring, VariableRanking): output", "def callback(model, where): if where == gurobipy.GRB.Callback.MIPSOL: for var in", "where == gurobipy.GRB.Callback.MIPSOL: for var in model.getVars(): value = model.cbGetSolution(var)", "ReducedCostScoring(sol, base_kernel) return output class AbstactVariableScoring: def __init__(self, solution: Solution,", "curr_kernel, curr_bucket): raise NotImplementedError def failure_update_score(self, curr_kernel, curr_bucket): raise NotImplementedError", "failure_update_score(self, curr_kernel, curr_bucket): pass class VariableRanking(AbstactVariableScoring): def cb_update_score(self, name, value):", "var in model.getVars(): value = model.cbGetSolution(var) scoring.cb_update_score(var.varName, value) return callback", "var in curr_bucket: if curr_kernel[var]: self.score[var] -= 15 else: self.score[var]", "return output class AbstactVariableScoring: def __init__(self, solution: Solution, base_kernel: dict):", "def failure_update_score(self, curr_kernel, curr_bucket): pass class VariableRanking(AbstactVariableScoring): def cb_update_score(self, name," ]
[ "of issues arising from PATH, sys.path, multiple Pythons, their interactions,", "users suffer every time an entrypoint gets moved. To alleviate", "'-m pip' instead of \" \"running pip directly.\\n\" ) return", "for the current one. \"\"\" sys.stderr.write( \"WARNING: pip is being", "avoid this problem you can invoke Python with '-m pip'", "Because of issues arising from PATH, sys.path, multiple Pythons, their", "provide a mechanism for warning users and directing them to", "of pip.\\n\" \"Please see https://github.com/pypa/pip/issues/5599 for advice on \" \"fixing", "typing import Optional, List def _wrapper(args=None): # type: (Optional[List[str]]) ->", "\"fixing the underlying issue.\\n\" \"To avoid this problem you can", "MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import Optional, List def _wrapper(args=None):", "pip has had several entrypoints defined. Because of issues arising", "advice on \" \"fixing the underlying issue.\\n\" \"To avoid this", "current one. \"\"\" sys.stderr.write( \"WARNING: pip is being invoked by", "alleviate this pain, and provide a mechanism for warning users", "define all of our old entrypoints as wrappers for the", "if MYPY_CHECK_RUNNING: from typing import Optional, List def _wrapper(args=None): #", "time an entrypoint gets moved. To alleviate this pain, and", "entrypoints as wrappers for the current one. \"\"\" sys.stderr.write( \"WARNING:", "gets moved. To alleviate this pain, and provide a mechanism", "issue.\\n\" \"To avoid this problem you can invoke Python with", "https://github.com/pypa/pip/issues/5599 for advice on \" \"fixing the underlying issue.\\n\" \"To", "an appropriate place for help, we now define all of", "an entrypoint gets moved. To alleviate this pain, and provide", "this problem you can invoke Python with '-m pip' instead", "this pain, and provide a mechanism for warning users and", "import sys from fetchcode.vcs.pip._internal.cli.main import main from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING", "multiple Pythons, their interactions, and most of them having a", "<gh_stars>1-10 import sys from fetchcode.vcs.pip._internal.cli.main import main from fetchcode.vcs.pip._internal.utils.typing import", "List def _wrapper(args=None): # type: (Optional[List[str]]) -> int \"\"\"Central wrapper", "entrypoint gets moved. To alleviate this pain, and provide a", "with '-m pip' instead of \" \"running pip directly.\\n\" )", "a pip installed, users suffer every time an entrypoint gets", "is being invoked by an old script wrapper. This will", "and directing them to an appropriate place for help, we", "several entrypoints defined. Because of issues arising from PATH, sys.path,", "Optional, List def _wrapper(args=None): # type: (Optional[List[str]]) -> int \"\"\"Central", "interactions, and most of them having a pip installed, users", "\" \"fixing the underlying issue.\\n\" \"To avoid this problem you", "MYPY_CHECK_RUNNING: from typing import Optional, List def _wrapper(args=None): # type:", "sys.path, multiple Pythons, their interactions, and most of them having", "all old entrypoints. Historically pip has had several entrypoints defined.", "def _wrapper(args=None): # type: (Optional[List[str]]) -> int \"\"\"Central wrapper for", "most of them having a pip installed, users suffer every", "all of our old entrypoints as wrappers for the current", "suffer every time an entrypoint gets moved. To alleviate this", "an old script wrapper. This will \" \"fail in a", "\"\"\"Central wrapper for all old entrypoints. Historically pip has had", "\"WARNING: pip is being invoked by an old script wrapper.", "the underlying issue.\\n\" \"To avoid this problem you can invoke", "every time an entrypoint gets moved. To alleviate this pain,", "entrypoints. Historically pip has had several entrypoints defined. Because of", "\"\"\" sys.stderr.write( \"WARNING: pip is being invoked by an old", "mechanism for warning users and directing them to an appropriate", "To alleviate this pain, and provide a mechanism for warning", "wrapper for all old entrypoints. Historically pip has had several", "invoke Python with '-m pip' instead of \" \"running pip", "version of pip.\\n\" \"Please see https://github.com/pypa/pip/issues/5599 for advice on \"", "# type: (Optional[List[str]]) -> int \"\"\"Central wrapper for all old", "to an appropriate place for help, we now define all", "old entrypoints. Historically pip has had several entrypoints defined. Because", "import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import Optional, List def", "now define all of our old entrypoints as wrappers for", "sys.stderr.write( \"WARNING: pip is being invoked by an old script", "from PATH, sys.path, multiple Pythons, their interactions, and most of", "their interactions, and most of them having a pip installed,", "int \"\"\"Central wrapper for all old entrypoints. Historically pip has", "of them having a pip installed, users suffer every time", "for warning users and directing them to an appropriate place", "script wrapper. This will \" \"fail in a future version", "This will \" \"fail in a future version of pip.\\n\"", "see https://github.com/pypa/pip/issues/5599 for advice on \" \"fixing the underlying issue.\\n\"", "underlying issue.\\n\" \"To avoid this problem you can invoke Python", "having a pip installed, users suffer every time an entrypoint", "directing them to an appropriate place for help, we now", "invoked by an old script wrapper. This will \" \"fail", "\" \"fail in a future version of pip.\\n\" \"Please see", "for help, we now define all of our old entrypoints", "defined. Because of issues arising from PATH, sys.path, multiple Pythons,", "help, we now define all of our old entrypoints as", "for advice on \" \"fixing the underlying issue.\\n\" \"To avoid", "pip is being invoked by an old script wrapper. This", "wrapper. This will \" \"fail in a future version of", "has had several entrypoints defined. Because of issues arising from", "them to an appropriate place for help, we now define", "Pythons, their interactions, and most of them having a pip", "main from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import", "type: (Optional[List[str]]) -> int \"\"\"Central wrapper for all old entrypoints.", "we now define all of our old entrypoints as wrappers", "our old entrypoints as wrappers for the current one. \"\"\"", "a future version of pip.\\n\" \"Please see https://github.com/pypa/pip/issues/5599 for advice", "(Optional[List[str]]) -> int \"\"\"Central wrapper for all old entrypoints. Historically", "the current one. \"\"\" sys.stderr.write( \"WARNING: pip is being invoked", "arising from PATH, sys.path, multiple Pythons, their interactions, and most", "for all old entrypoints. Historically pip has had several entrypoints", "on \" \"fixing the underlying issue.\\n\" \"To avoid this problem", "and most of them having a pip installed, users suffer", "had several entrypoints defined. Because of issues arising from PATH,", "will \" \"fail in a future version of pip.\\n\" \"Please", "import main from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing", "\"fail in a future version of pip.\\n\" \"Please see https://github.com/pypa/pip/issues/5599", "Python with '-m pip' instead of \" \"running pip directly.\\n\"", "one. \"\"\" sys.stderr.write( \"WARNING: pip is being invoked by an", "PATH, sys.path, multiple Pythons, their interactions, and most of them", "users and directing them to an appropriate place for help,", "installed, users suffer every time an entrypoint gets moved. To", "fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import Optional, List", "issues arising from PATH, sys.path, multiple Pythons, their interactions, and", "old entrypoints as wrappers for the current one. \"\"\" sys.stderr.write(", "from fetchcode.vcs.pip._internal.cli.main import main from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING:", "by an old script wrapper. This will \" \"fail in", "sys from fetchcode.vcs.pip._internal.cli.main import main from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING if", "-> int \"\"\"Central wrapper for all old entrypoints. Historically pip", "\"To avoid this problem you can invoke Python with '-m", "and provide a mechanism for warning users and directing them", "moved. To alleviate this pain, and provide a mechanism for", "of our old entrypoints as wrappers for the current one.", "import Optional, List def _wrapper(args=None): # type: (Optional[List[str]]) -> int", "problem you can invoke Python with '-m pip' instead of", "being invoked by an old script wrapper. This will \"", "_wrapper(args=None): # type: (Optional[List[str]]) -> int \"\"\"Central wrapper for all", "as wrappers for the current one. \"\"\" sys.stderr.write( \"WARNING: pip", "pip.\\n\" \"Please see https://github.com/pypa/pip/issues/5599 for advice on \" \"fixing the", "them having a pip installed, users suffer every time an", "can invoke Python with '-m pip' instead of \" \"running", "fetchcode.vcs.pip._internal.cli.main import main from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from", "\"Please see https://github.com/pypa/pip/issues/5599 for advice on \" \"fixing the underlying", "from typing import Optional, List def _wrapper(args=None): # type: (Optional[List[str]])", "wrappers for the current one. \"\"\" sys.stderr.write( \"WARNING: pip is", "Historically pip has had several entrypoints defined. Because of issues", "in a future version of pip.\\n\" \"Please see https://github.com/pypa/pip/issues/5599 for", "future version of pip.\\n\" \"Please see https://github.com/pypa/pip/issues/5599 for advice on", "pain, and provide a mechanism for warning users and directing", "warning users and directing them to an appropriate place for", "you can invoke Python with '-m pip' instead of \"", "appropriate place for help, we now define all of our", "entrypoints defined. Because of issues arising from PATH, sys.path, multiple", "old script wrapper. This will \" \"fail in a future", "place for help, we now define all of our old", "pip installed, users suffer every time an entrypoint gets moved.", "from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import Optional,", "a mechanism for warning users and directing them to an", "pip' instead of \" \"running pip directly.\\n\" ) return main(args)" ]
[ "= comment.replace('@doc-functions','') else: # Unwanted comment; skip. continue # Record", "with custom header. # bb_lines = header_lines + Get_BB_Text(doc_lines) #", "xml file. ''' # List of tuples of (label, text)", "# Lua functions are in one lump, like overview. elif", "'.lua': doc_lines += Get_Lua_Text(file_path) with open(doc_path, 'w') as file: file.write('\\n'.join(doc_lines))", "issues with the text helper functions below.) for rel_path, file_list", "dict of {section label: text} to a list of text", "# Starts blank, filled by decorator. title = '' #", "# Grab the readme contents. # doc_lines = (doc_dir /", "# release_dir.mkdir() # # # Grab the readme contents. #", "included in the class methods to avoid # import issues", "comment_blocks.append('\\n'.join(these_lines)) # Check single-line comments after block comments, to avoid", "full path. doc_path = spec.root_path / rel_path # Get lines", "loop. i += 1 # Title to put on label", "ret_text_lines.append('* **{}**'.format(key)) # Process the text a bit. text =", "for rel_path, file_list in spec.doc_specs.items(): # Set up the full", "from a decorated lua file. ''' text = lua_path.read_text() ret_text_lines", "if not functions_started: functions_started = True ret_text_lines += ['', '###", "= header_lines + Get_BB_Text(doc_lines) # (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\\n'.join(bb_lines))", "of the doc. while i < len(lua_lines): next_line = lua_lines[i]", "this. comment_blocks = [] lua_lines = text.splitlines() i = 0", "= ' ' # Stick a label line when starting", "= node.getnext().get('name') text = node.text.replace('@doc-cue','') else: # Unwanted comment; skip.", "'overview', then a series of names of cues or functions.", "if key == 'overview': ret_text_lines += ['', '### {} Overview'.format(title),", "one lump, like overview. elif key == 'functions': ret_text_lines +=", "= node.text.replace('@doc-cue','') else: # Unwanted comment; skip. continue # Record", "this_line.startswith('--[['): # Scan until the closing ]]. these_lines = []", "= 'section' text = node.text.replace('@doc-section','') elif '@doc-cue' in node.text: label", "the project specifications. from Release_Specs import release_specs def Make(): for", "Make each of the doc files (if any). # (Note:", "import etree import sys from itertools import chain project_dir =", "spec.doc_specs.items(): # Set up the full path. doc_path = spec.root_path", "''' text = lua_path.read_text() ret_text_lines = [] # Extract non-indented", "generally avoiding putting main docs on the forum. #def Make_BB_Code(doc_dir,", "# Generate a bbcode version, prefixing with custom header. #", "the cues. tree = etree.parse(str(xml_path)) root = tree.xpath('/*')[0] cues =", "how lxml checks this (isinstance doesn't work). if node.tag is", "# Stick a label line when starting the function section.", "= '' # List of tuples of (label, text) hold", "for node in chain(root.iterchildren(), cues.iterchildren()): # Skip non-comments. # Kinda", "#def Make_BB_Code(doc_dir, header_lines = []): # ''' # Turn the", "is placed in the release folder. # ''' # release_dir", "== 'overview': ret_text_lines += ['', '### {} Overview'.format(title), ''] indent", "Unwanted comment; skip. continue # Record it. doc_text_sections.append((label, text)) #", "x4_customizer_dir not in sys.path: sys.path.append(x4_customizer_dir) from Framework.Make_Documentation import Merge_Lines #from", "functions. ''' # Transfer to annotated/indented lines. functions_started = False", "Looking for decorated comments. for node in chain(root.iterchildren(), cues.iterchildren()): #", "formatting applied. Expects the input to start with a 'title',", "= 'overview' text = comment.replace('@doc-overview','') # For now, all functions", "per loop. i += 1 # Title to put on", "file_path.suffix == '.lua': doc_lines += Get_Lua_Text(file_path) with open(doc_path, 'w') as", "elif key == 'functions': ret_text_lines += ['', '### {} Functions'.format(title),", "= [] # Extract non-indented comments. # TODO: maybe regex", "= '' # Otherwise these are md cues. else: indent", "if not release_dir.exists(): # release_dir.mkdir() # # # Grab the", "= '' ret_text_lines = [] for key, text in doc_text_sections:", "cues. tree = etree.parse(str(xml_path)) root = tree.xpath('/*')[0] cues = tree.xpath('/*/cues')[0]", "with labelling and formatting applied. Expects the input to start", "or functions. ''' # Transfer to annotated/indented lines. functions_started =", "to a list of text lines, with labelling and formatting", "ret_text_lines += ['', '### {} Cues'.format(title), ''] # Bullet the", "etree.parse(str(xml_path)) root = tree.xpath('/*')[0] cues = tree.xpath('/*/cues')[0] # Stride through", "to annotated/indented lines. functions_started = False title = '' ret_text_lines", "False title = '' ret_text_lines = [] for key, text", "and break into convenient lines. text_lines = [indent + line", "hold the extracted text lines. doc_text_sections = [] # Read", "filled by decorator. title = '' # List of tuples", "else: # Unwanted comment; skip. continue # Record it. doc_text_sections.append((label,", "text} to a list of text lines, with labelling and", "Turn the ext_dir's readme into a bbcode txt file. #", "label = 'functions' text = comment.replace('@doc-functions','') else: # Unwanted comment;", "1 # Only search to the end of the doc.", "<filename>Support/Make_Documentation.py ''' Support for generating documentation readmes for the extensions.", "node.getnext().get('name') text = node.text.replace('@doc-cue','') else: # Unwanted comment; skip. continue", "elif '@doc-functions' in comment: label = 'functions' text = comment.replace('@doc-functions','')", "'@doc-section' in node.text: label = 'section' text = node.text.replace('@doc-section','') elif", "text = node.text.replace('@doc-overview','') elif '@doc-section' in node.text: label = 'section'", "placed in the release folder. # ''' # release_dir =", "i += 1 # Title to put on label lines.", "# (Note: this function not included in the class methods", "to functions, and break into convenient lines. text_lines = [indent", "file. # Output is placed in the release folder. #", "text in doc_text_sections: # Extract the title and continue; this", "+= ['', '### {} Overview'.format(title), ''] indent = '' #", "the forum. #def Make_BB_Code(doc_dir, header_lines = []): # ''' #", "'@doc-title' in comment: label = 'title' text = comment.replace('@doc-title','') #", "header_lines = []): # ''' # Turn the ext_dir's readme", "Framework.Make_Documentation import Get_BB_Text # Grab the project specifications. from Release_Specs", "project specifications. from Release_Specs import release_specs def Make(): for spec", "holding the documentation extracted from a decorated MD xml file.", "label line when starting the function section. if not functions_started:", "(isinstance doesn't work). if node.tag is not etree.Comment: continue #", "indent = '' # Lua functions are in one lump,", "are in one lump, like overview. elif key == 'functions':", "Read the xml and pick out the cues. tree =", "# Text blocks are either overview or cue. elif '@doc-overview'", "extracted from a decorated MD xml file. ''' # List", "text.strip() continue # Header gets an 'overview' label. if key", "a dict of {section label: text} to a list of", "+= 1 comment_blocks.append('\\n'.join(these_lines)) # Check single-line comments after block comments,", "# Set up an import from the customizer for some", "functions are in one lump, like overview. elif key ==", "comment_blocks.append(this_line.replace('--','')) # Always one increment per loop. i += 1", "'@doc-title' in node.text: label = 'title' text = node.text.replace('@doc-title','') elif", "one increment per loop. i += 1 # Title to", "# Transfer to annotated/indented lines. functions_started = False title =", "# if not release_dir.exists(): # release_dir.mkdir() # # # Grab", "# Set up the full path. doc_path = spec.root_path /", "forum. #def Make_BB_Code(doc_dir, header_lines = []): # ''' # Turn", "# Handle title declarations. if '@doc-title' in node.text: label =", "['',''] indent = '' # Otherwise these are md cues.", "Stride through comments/cues in the list. # Looking for decorated", "text = comment.replace('@doc-overview','') # For now, all functions are lumped", "when starting the function section. if not functions_started: functions_started =", "readme into a bbcode txt file. # Output is placed", "if key == 'title': title = text.strip() continue # Header", "the extracted text lines. doc_text_sections = [] # Read the", "lines and return. return Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path): ''' Extract documentation", "if '@doc-title' in comment: label = 'title' text = comment.replace('@doc-title','')", "'' # List of tuples of (label, text) hold the", "== 'title': title = text.strip() continue # Header gets an", "'@doc-overview' in comment: label = 'overview' text = comment.replace('@doc-overview','') #", "node.text.replace('@doc-overview','') elif '@doc-section' in node.text: label = 'section' text =", "# Skip non-comments. # Kinda awkward how lxml checks this", "elif key == 'section': ret_text_lines += ['',''] indent = ''", "# Get lines for all files. doc_lines = [] for", "label = 'overview' text = comment.replace('@doc-overview','') # For now, all", "line in text.splitlines()] # Record for output. ret_text_lines += text_lines", "name. ret_text_lines.append('* **{}**'.format(key)) # Process the text a bit. text", "elif '@doc-overview' in node.text: label = 'overview' text = node.text.replace('@doc-overview','')", "comment; skip. continue # Record it. doc_text_sections.append((label, text)) # Process", "multiple. elif key == 'section': ret_text_lines += ['',''] indent =", "# Record the first line. these_lines.append(this_line.replace('--[[','')) i += 1 #", "text_lines = [indent + line for line in text.splitlines()] #", "annotated/indented lines. functions_started = False title = '' ret_text_lines =", "path. doc_path = spec.root_path / rel_path # Get lines for", "# Grab the project specifications. from Release_Specs import release_specs def", "Record it. doc_text_sections.append((label, text)) # Process into lines and return.", "ext_dir's readme into a bbcode txt file. # Output is", "root = tree.xpath('/*')[0] cues = tree.xpath('/*/cues')[0] # Stride through comments/cues", "['', '### {} Functions'.format(title), ''] indent = '' # Sections", "comments/cues in the list. # Looking for decorated comments. for", "for decorated comments. for node in chain(root.iterchildren(), cues.iterchildren()): # Skip", "lines. doc_text_sections = [] # Go through the comments looking", "out the cues. tree = etree.parse(str(xml_path)) root = tree.xpath('/*')[0] cues", "bit. text = Merge_Lines(text) # Add indents to functions, and", "all of the content.xml files. spec.Update_Content_Version() # Make each of", "documentation extracted from a decorated MD xml file. ''' #", "sys.path.append(x4_customizer_dir) from Framework.Make_Documentation import Merge_Lines #from Framework.Make_Documentation import Get_BB_Text #", "in the class methods to avoid # import issues with", "node.text.replace('@doc-section','') elif '@doc-cue' in node.text: label = node.getnext().get('name') text =", "starting the function section. if not functions_started: functions_started = True", "readmes for the extensions. Extracts from decorated lua block comments", "'@doc-overview' in node.text: label = 'overview' text = node.text.replace('@doc-overview','') elif", "below.) for rel_path, file_list in spec.doc_specs.items(): # Set up the", "Make(): for spec in release_specs: # Update all of the", "files. spec.Update_Content_Version() # Make each of the doc files (if", "not release_dir.exists(): # release_dir.mkdir() # # # Grab the readme", "label = 'title' text = node.text.replace('@doc-title','') elif '@doc-overview' in node.text:", "a decorated MD xml file. ''' # List of tuples", "decorated comments. for node in chain(root.iterchildren(), cues.iterchildren()): # Skip non-comments.", "if this_line.startswith('--[['): # Scan until the closing ]]. these_lines =", "comments. ''' from pathlib import Path from lxml import etree", "files (if any). # (Note: this function not included in", "Lua functions are in one lump, like overview. elif key", "comment_blocks = [] lua_lines = text.splitlines() i = 0 while", "by decorator. title = '' # List of tuples of", "are lumped together in one comment. elif '@doc-functions' in comment:", "contents. # doc_lines = (doc_dir / 'Readme.md').read_text().splitlines() # # Generate", "xml comments. ''' from pathlib import Path from lxml import", "not included in the class methods to avoid # import", "the function name. ret_text_lines.append('* **{}**'.format(key)) # Process the text a", "last line; skip it. break these_lines.append(next_line) i += 1 comment_blocks.append('\\n'.join(these_lines))", "elif '@doc-overview' in comment: label = 'overview' text = comment.replace('@doc-overview','')", "Get_XML_Cue_Text(file_path) elif file_path.suffix == '.lua': doc_lines += Get_Lua_Text(file_path) with open(doc_path,", "with a 'title', then 'overview', then a series of names", "in one lump, like overview. elif key == 'functions': ret_text_lines", "comment: label = 'title' text = comment.replace('@doc-title','') # Text blocks", "For now, all functions are lumped together in one comment.", "# Record it. doc_text_sections.append((label, text)) # Process into lines and", "decorated lua file. ''' text = lua_path.read_text() ret_text_lines = []", "label lines. # Starts blank, filled by decorator. title =", "after block comments, to avoid # -- confusion. elif this_line.startswith('--'):", "a bbcode txt file. # Output is placed in the", "in text.splitlines()] # Record for output. ret_text_lines += text_lines return", "from pathlib import Path from lxml import etree import sys", "tree.xpath('/*')[0] cues = tree.xpath('/*/cues')[0] # Stride through comments/cues in the", "closing ]]. these_lines = [] # Record the first line.", "# Bullet the function name. ret_text_lines.append('* **{}**'.format(key)) # Process the", "''' Extract documentation text from a decorated lua file. '''", "a bbcode version, prefixing with custom header. # bb_lines =", "tuples of (label, text) hold the extracted text lines. doc_text_sections", "{} Functions'.format(title), ''] indent = '' # Sections may be", "in node.text: label = 'title' text = node.text.replace('@doc-title','') elif '@doc-overview'", "lines, with labelling and formatting applied. Expects the input to", "series of names of cues or functions. ''' # Transfer", "indent = '' # Sections may be multiple. elif key", "def Make(): for spec in release_specs: # Update all of", "file.write('\\n'.join(doc_lines)) return def Sections_To_Lines(doc_text_sections): ''' Converts a dict of {section", "the xml and pick out the cues. tree = etree.parse(str(xml_path))", "into lines and return. return Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path): ''' Extract", "the comments looking for decorators. for comment in comment_blocks: #", "# Update all of the content.xml files. spec.Update_Content_Version() # Make", "comment.replace('@doc-functions','') else: # Unwanted comment; skip. continue # Record it.", "Scan until the closing ]]. these_lines = [] # Record", "return ret_text_lines def Get_XML_Cue_Text(xml_path): ''' Returns a list of lines", "from Framework.Make_Documentation import Merge_Lines #from Framework.Make_Documentation import Get_BB_Text # Grab", "generating documentation readmes for the extensions. Extracts from decorated lua", "= tree.xpath('/*')[0] cues = tree.xpath('/*/cues')[0] # Stride through comments/cues in", "into a bbcode txt file. # Output is placed in", "on label lines. # Starts blank, filled by decorator. title", "blocks are either overview or cue. elif '@doc-overview' in comment:", "file. ''' # List of tuples of (label, text) hold", "Overview'.format(title), ''] indent = '' # Lua functions are in", "node.text.replace('@doc-title','') elif '@doc-overview' in node.text: label = 'overview' text =", "Extracts from decorated lua block comments and xml comments. '''", "= [] lua_lines = text.splitlines() i = 0 while i", "pathlib import Path from lxml import etree import sys from", "the release folder. # ''' # release_dir = project_dir /", "avoid # import issues with the text helper functions below.)", "Get_XML_Cue_Text(xml_path): ''' Returns a list of lines holding the documentation", "doc_lines = (doc_dir / 'Readme.md').read_text().splitlines() # # Generate a bbcode", "(doc_dir.name + '_bb_readme.txt')).write_text('\\n'.join(bb_lines)) # return if __name__ == '__main__': Make()", "comment: label = 'overview' text = comment.replace('@doc-overview','') # For now,", "an import from the customizer for some text processing. x4_customizer_dir", "**{}**'.format(key)) # Process the text a bit. text = Merge_Lines(text)", "key == 'title': title = text.strip() continue # Header gets", "Path(__file__).resolve().parents[1] # Set up an import from the customizer for", "Always one increment per loop. i += 1 # Title", "readme contents. # doc_lines = (doc_dir / 'Readme.md').read_text().splitlines() # #", "ret_text_lines += ['', '### {} Functions'.format(title), ''] indent = ''", "doc_text_sections.append((label, text)) # Process into lines and return. return Sections_To_Lines(doc_text_sections)", "comments, to avoid # -- confusion. elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) #", "if file_path.suffix == '.xml': doc_lines += Get_XML_Cue_Text(file_path) elif file_path.suffix ==", "+= text_lines return ret_text_lines def Get_XML_Cue_Text(xml_path): ''' Returns a list", "skip. continue # Record it. doc_text_sections.append((label, text)) # Process into", "def Sections_To_Lines(doc_text_sections): ''' Converts a dict of {section label: text}", "line when starting the function section. if not functions_started: functions_started", "# Found the last line; skip it. break these_lines.append(next_line) i", "return Sections_To_Lines(doc_text_sections) #-Removed; generally avoiding putting main docs on the", "xml and pick out the cues. tree = etree.parse(str(xml_path)) root", "tree = etree.parse(str(xml_path)) root = tree.xpath('/*')[0] cues = tree.xpath('/*/cues')[0] #", "Bullet the function name. ret_text_lines.append('* **{}**'.format(key)) # Process the text", "[] # Read the xml and pick out the cues.", "Merge_Lines(text) # Add indents to functions, and break into convenient", "text.splitlines()] # Record for output. ret_text_lines += text_lines return ret_text_lines", "0 while i < len(lua_lines): this_line = lua_lines[i] if this_line.startswith('--[['):", "break into convenient lines. text_lines = [indent + line for", "header_lines + Get_BB_Text(doc_lines) # (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\\n'.join(bb_lines)) #", "/ (doc_dir.name + '_bb_readme.txt')).write_text('\\n'.join(bb_lines)) # return if __name__ == '__main__':", "+= 1 # Title to put on label lines. #", "# Extract the title and continue; this isn't printed directly.", "file_list in spec.doc_specs.items(): # Set up the full path. doc_path", "# List of tuples of (label, text) hold the extracted", "continue # Handle title declarations. if '@doc-title' in node.text: label", "comments after block comments, to avoid # -- confusion. elif", "lines. functions_started = False title = '' ret_text_lines = []", "maybe regex this. comment_blocks = [] lua_lines = text.splitlines() i", "from lxml import etree import sys from itertools import chain", "md cues. else: indent = ' ' # Stick a", "i += 1 comment_blocks.append('\\n'.join(these_lines)) # Check single-line comments after block", "Get_BB_Text(doc_lines) # (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\\n'.join(bb_lines)) # return if", "ret_text_lines = [] for key, text in doc_text_sections: # Extract", "= 'overview' text = node.text.replace('@doc-overview','') elif '@doc-section' in node.text: label", "text.splitlines() i = 0 while i < len(lua_lines): this_line =", "i = 0 while i < len(lua_lines): this_line = lua_lines[i]", "[] for key, text in doc_text_sections: # Extract the title", "the customizer for some text processing. x4_customizer_dir = str(project_dir.parent /", "title and continue; this isn't printed directly. if key ==", "= '' # Sections may be multiple. elif key ==", "= tree.xpath('/*/cues')[0] # Stride through comments/cues in the list. #", "text helper functions below.) for rel_path, file_list in spec.doc_specs.items(): #", "in spec.doc_specs.items(): # Set up the full path. doc_path =", "key == 'functions': ret_text_lines += ['', '### {} Functions'.format(title), '']", "== 'functions': ret_text_lines += ['', '### {} Functions'.format(title), ''] indent", "# Looking for decorated comments. for node in chain(root.iterchildren(), cues.iterchildren()):", "sys from itertools import chain project_dir = Path(__file__).resolve().parents[1] # Set", "non-indented comments. # TODO: maybe regex this. comment_blocks = []", "for spec in release_specs: # Update all of the content.xml", "the first line. these_lines.append(this_line.replace('--[[','')) i += 1 # Only search", "this_line = lua_lines[i] if this_line.startswith('--[['): # Scan until the closing", "putting main docs on the forum. #def Make_BB_Code(doc_dir, header_lines =", "in release_specs: # Update all of the content.xml files. spec.Update_Content_Version()", "'### {} Functions'.format(title), ''] indent = '' # Sections may", "'@doc-cue' in node.text: label = node.getnext().get('name') text = node.text.replace('@doc-cue','') else:", "text = comment.replace('@doc-title','') # Text blocks are either overview or", "pick out the cues. tree = etree.parse(str(xml_path)) root = tree.xpath('/*')[0]", "spec in release_specs: # Update all of the content.xml files.", "break these_lines.append(next_line) i += 1 comment_blocks.append('\\n'.join(these_lines)) # Check single-line comments", "to avoid # import issues with the text helper functions", "decorator. title = '' # List of tuples of (label,", "overview or cue. elif '@doc-overview' in comment: label = 'overview'", "Get lines for all files. doc_lines = [] for file_path", "and formatting applied. Expects the input to start with a", "lua_lines[i] if this_line.startswith('--[['): # Scan until the closing ]]. these_lines", "and pick out the cues. tree = etree.parse(str(xml_path)) root =", "to avoid # -- confusion. elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) # Always", "to put on label lines. # Starts blank, filled by", "or cue. elif '@doc-overview' in comment: label = 'overview' text", "= (doc_dir / 'Readme.md').read_text().splitlines() # # Generate a bbcode version,", "(doc_dir / 'Readme.md').read_text().splitlines() # # Generate a bbcode version, prefixing", "def Get_XML_Cue_Text(xml_path): ''' Returns a list of lines holding the", "'title' text = comment.replace('@doc-title','') # Text blocks are either overview", "then 'overview', then a series of names of cues or", "= [] for file_path in file_list: if file_path.suffix == '.xml':", "project_dir = Path(__file__).resolve().parents[1] # Set up an import from the", "ret_text_lines += ['',''] indent = '' # Otherwise these are", "Sections may be multiple. elif key == 'section': ret_text_lines +=", "Functions'.format(title), ''] indent = '' # Sections may be multiple.", "Header gets an 'overview' label. if key == 'overview': ret_text_lines", "# # Grab the readme contents. # doc_lines = (doc_dir", "key == 'overview': ret_text_lines += ['', '### {} Overview'.format(title), '']", "#-Removed; generally avoiding putting main docs on the forum. #def", "[] # Record the first line. these_lines.append(this_line.replace('--[[','')) i += 1", "+= ['', '### {} Cues'.format(title), ''] # Bullet the function", "# TODO: maybe regex this. comment_blocks = [] lua_lines =", "lua_lines = text.splitlines() i = 0 while i < len(lua_lines):", "the full path. doc_path = spec.root_path / rel_path # Get", "lxml import etree import sys from itertools import chain project_dir", "a list of text lines, with labelling and formatting applied.", "Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path): ''' Extract documentation text from a decorated", "= [] # Record the first line. these_lines.append(this_line.replace('--[[','')) i +=", "first line. these_lines.append(this_line.replace('--[[','')) i += 1 # Only search to", "end of the doc. while i < len(lua_lines): next_line =", "return. return Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path): ''' Extract documentation text from", "for line in text.splitlines()] # Record for output. ret_text_lines +=", "return def Sections_To_Lines(doc_text_sections): ''' Converts a dict of {section label:", "label = 'overview' text = node.text.replace('@doc-overview','') elif '@doc-section' in node.text:", "documentation text from a decorated lua file. ''' text =", "# import issues with the text helper functions below.) for", "Text blocks are either overview or cue. elif '@doc-overview' in", "[] for file_path in file_list: if file_path.suffix == '.xml': doc_lines", "ret_text_lines += ['', '### {} Overview'.format(title), ''] indent = ''", "Record the first line. these_lines.append(this_line.replace('--[[','')) i += 1 # Only", "header. # bb_lines = header_lines + Get_BB_Text(doc_lines) # (release_dir /", "Sections_To_Lines(doc_text_sections) #-Removed; generally avoiding putting main docs on the forum.", "node.text.replace('@doc-cue','') else: # Unwanted comment; skip. continue # Record it.", "= lua_lines[i] if this_line.startswith('--[['): # Scan until the closing ]].", "release_dir = project_dir / 'Release' # if not release_dir.exists(): #", "are either overview or cue. elif '@doc-overview' in comment: label", "regex this. comment_blocks = [] lua_lines = text.splitlines() i =", "title = '' ret_text_lines = [] for key, text in", "functions_started: functions_started = True ret_text_lines += ['', '### {} Cues'.format(title),", "/ 'Release' # if not release_dir.exists(): # release_dir.mkdir() # #", "lxml checks this (isinstance doesn't work). if node.tag is not", "= [] # Read the xml and pick out the", "the class methods to avoid # import issues with the", "release_dir.exists(): # release_dir.mkdir() # # # Grab the readme contents.", "doc_lines += Get_XML_Cue_Text(file_path) elif file_path.suffix == '.lua': doc_lines += Get_Lua_Text(file_path)", "like overview. elif key == 'functions': ret_text_lines += ['', '###", "an 'overview' label. if key == 'overview': ret_text_lines += ['',", "extracted text lines. doc_text_sections = [] # Read the xml", "comment: label = 'functions' text = comment.replace('@doc-functions','') else: # Unwanted", "'' # Sections may be multiple. elif key == 'section':", "chain project_dir = Path(__file__).resolve().parents[1] # Set up an import from", "Release_Specs import release_specs def Make(): for spec in release_specs: #", "''' Returns a list of lines holding the documentation extracted", "of tuples of (label, text) hold the extracted text lines.", "blank, filled by decorator. title = '' # List of", "until the closing ]]. these_lines = [] # Record the", "this function not included in the class methods to avoid", "names of cues or functions. ''' # Transfer to annotated/indented", "'title' text = node.text.replace('@doc-title','') elif '@doc-overview' in node.text: label =", "function not included in the class methods to avoid #", "avoid # -- confusion. elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) # Always one", "together in one comment. elif '@doc-functions' in comment: label =", "Go through the comments looking for decorators. for comment in", "folder. # ''' # release_dir = project_dir / 'Release' #", "ret_text_lines = [] # Extract non-indented comments. # TODO: maybe", "lines for all files. doc_lines = [] for file_path in", "Framework.Make_Documentation import Merge_Lines #from Framework.Make_Documentation import Get_BB_Text # Grab the", "'w') as file: file.write('\\n'.join(doc_lines)) return def Sections_To_Lines(doc_text_sections): ''' Converts a", "block comments and xml comments. ''' from pathlib import Path", "ret_text_lines def Get_XML_Cue_Text(xml_path): ''' Returns a list of lines holding", "lines and return. return Sections_To_Lines(doc_text_sections) #-Removed; generally avoiding putting main", "are md cues. else: indent = ' ' # Stick", "text = comment.replace('@doc-functions','') else: # Unwanted comment; skip. continue #", "Expects the input to start with a 'title', then 'overview',", "in comment: label = 'overview' text = comment.replace('@doc-overview','') # For", "# Turn the ext_dir's readme into a bbcode txt file.", "release_specs: # Update all of the content.xml files. spec.Update_Content_Version() #", "title = text.strip() continue # Header gets an 'overview' label.", "lines. text_lines = [indent + line for line in text.splitlines()]", "lumped together in one comment. elif '@doc-functions' in comment: label", "in node.text: label = node.getnext().get('name') text = node.text.replace('@doc-cue','') else: #", "= node.text.replace('@doc-title','') elif '@doc-overview' in node.text: label = 'overview' text", "# Add indents to functions, and break into convenient lines.", "of text lines, with labelling and formatting applied. Expects the", "increment per loop. i += 1 # Title to put", "printed directly. if key == 'title': title = text.strip() continue", "/ 'Readme.md').read_text().splitlines() # # Generate a bbcode version, prefixing with", "of cues or functions. ''' # Transfer to annotated/indented lines.", "Support for generating documentation readmes for the extensions. Extracts from", "(release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\\n'.join(bb_lines)) # return if __name__ ==", "functions are lumped together in one comment. elif '@doc-functions' in", "= node.text.replace('@doc-overview','') elif '@doc-section' in node.text: label = 'section' text", "for decorators. for comment in comment_blocks: # Handle title declarations.", "Sections_To_Lines(doc_text_sections): ''' Converts a dict of {section label: text} to", "the list. # Looking for decorated comments. for node in", "processing. x4_customizer_dir = str(project_dir.parent / 'X4_Customizer') if x4_customizer_dir not in", "title = '' # List of tuples of (label, text)", "the title and continue; this isn't printed directly. if key", "text) hold the extracted text lines. doc_text_sections = [] #", "(if any). # (Note: this function not included in the", "# Otherwise these are md cues. else: indent = '", "etree import sys from itertools import chain project_dir = Path(__file__).resolve().parents[1]", "the content.xml files. spec.Update_Content_Version() # Make each of the doc", "Update all of the content.xml files. spec.Update_Content_Version() # Make each", "txt file. # Output is placed in the release folder.", "a bit. text = Merge_Lines(text) # Add indents to functions,", "Transfer to annotated/indented lines. functions_started = False title = ''", "= []): # ''' # Turn the ext_dir's readme into", "Handle title declarations. if '@doc-title' in comment: label = 'title'", "sys.path: sys.path.append(x4_customizer_dir) from Framework.Make_Documentation import Merge_Lines #from Framework.Make_Documentation import Get_BB_Text", "any). # (Note: this function not included in the class", "non-comments. # Kinda awkward how lxml checks this (isinstance doesn't", "key, text in doc_text_sections: # Extract the title and continue;", "Stick a label line when starting the function section. if", "all functions are lumped together in one comment. elif '@doc-functions'", "a list of lines holding the documentation extracted from a", "confusion. elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) # Always one increment per loop.", "= str(project_dir.parent / 'X4_Customizer') if x4_customizer_dir not in sys.path: sys.path.append(x4_customizer_dir)", "' # Stick a label line when starting the function", "prefixing with custom header. # bb_lines = header_lines + Get_BB_Text(doc_lines)", "+= Get_XML_Cue_Text(file_path) elif file_path.suffix == '.lua': doc_lines += Get_Lua_Text(file_path) with", "# Record for output. ret_text_lines += text_lines return ret_text_lines def", "Generate a bbcode version, prefixing with custom header. # bb_lines", "a decorated lua file. ''' text = lua_path.read_text() ret_text_lines =", "Extract non-indented comments. # TODO: maybe regex this. comment_blocks =", "'overview' text = comment.replace('@doc-overview','') # For now, all functions are", "[] lua_lines = text.splitlines() i = 0 while i <", "applied. Expects the input to start with a 'title', then", "elif file_path.suffix == '.lua': doc_lines += Get_Lua_Text(file_path) with open(doc_path, 'w')", "gets an 'overview' label. if key == 'overview': ret_text_lines +=", "(label, text) hold the extracted text lines. doc_text_sections = []", "# # # Grab the readme contents. # doc_lines =", "functions_started = False title = '' ret_text_lines = [] for", "= Merge_Lines(text) # Add indents to functions, and break into", "the doc. while i < len(lua_lines): next_line = lua_lines[i] if", "these_lines = [] # Record the first line. these_lines.append(this_line.replace('--[[','')) i", "one comment. elif '@doc-functions' in comment: label = 'functions' text", "docs on the forum. #def Make_BB_Code(doc_dir, header_lines = []): #", "i < len(lua_lines): this_line = lua_lines[i] if this_line.startswith('--[['): # Scan", "comments looking for decorators. for comment in comment_blocks: # Handle", "for all files. doc_lines = [] for file_path in file_list:", "== '.xml': doc_lines += Get_XML_Cue_Text(file_path) elif file_path.suffix == '.lua': doc_lines", "now, all functions are lumped together in one comment. elif", "node.text: label = 'title' text = node.text.replace('@doc-title','') elif '@doc-overview' in", "Process the text a bit. text = Merge_Lines(text) # Add", "to the end of the doc. while i < len(lua_lines):", "comment.replace('@doc-overview','') # For now, all functions are lumped together in", "# For now, all functions are lumped together in one", "line for line in text.splitlines()] # Record for output. ret_text_lines", "decorated lua block comments and xml comments. ''' from pathlib", "for key, text in doc_text_sections: # Extract the title and", "function section. if not functions_started: functions_started = True ret_text_lines +=", "text lines, with labelling and formatting applied. Expects the input", "Make_BB_Code(doc_dir, header_lines = []): # ''' # Turn the ext_dir's", "into lines and return. return Sections_To_Lines(doc_text_sections) #-Removed; generally avoiding putting", "the text a bit. text = Merge_Lines(text) # Add indents", "= lua_lines[i] if next_line.startswith(']]'): # Found the last line; skip", "cues or functions. ''' # Transfer to annotated/indented lines. functions_started", "functions_started = True ret_text_lines += ['', '### {} Cues'.format(title), '']", "{} Cues'.format(title), ''] # Bullet the function name. ret_text_lines.append('* **{}**'.format(key))", "from a decorated MD xml file. ''' # List of", "label = 'title' text = comment.replace('@doc-title','') # Text blocks are", "'overview': ret_text_lines += ['', '### {} Overview'.format(title), ''] indent =", "in the list. # Looking for decorated comments. for node", "text = node.text.replace('@doc-section','') elif '@doc-cue' in node.text: label = node.getnext().get('name')", "from Release_Specs import release_specs def Make(): for spec in release_specs:", "= True ret_text_lines += ['', '### {} Cues'.format(title), ''] #", "convenient lines. text_lines = [indent + line for line in", "the closing ]]. these_lines = [] # Record the first", "lines holding the documentation extracted from a decorated MD xml", "the ext_dir's readme into a bbcode txt file. # Output", "= [] for key, text in doc_text_sections: # Extract the", "main docs on the forum. #def Make_BB_Code(doc_dir, header_lines = []):", "text = Merge_Lines(text) # Add indents to functions, and break", "= [indent + line for line in text.splitlines()] # Record", "and return. return Sections_To_Lines(doc_text_sections) #-Removed; generally avoiding putting main docs", "file: file.write('\\n'.join(doc_lines)) return def Sections_To_Lines(doc_text_sections): ''' Converts a dict of", "continue; this isn't printed directly. if key == 'title': title", "node.text: label = node.getnext().get('name') text = node.text.replace('@doc-cue','') else: # Unwanted", "'@doc-functions' in comment: label = 'functions' text = comment.replace('@doc-functions','') else:", "content.xml files. spec.Update_Content_Version() # Make each of the doc files", "# -- confusion. elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) # Always one increment", "up an import from the customizer for some text processing.", "'### {} Cues'.format(title), ''] # Bullet the function name. ret_text_lines.append('*", "a series of names of cues or functions. ''' #", "# Kinda awkward how lxml checks this (isinstance doesn't work).", "comments and xml comments. ''' from pathlib import Path from", "of {section label: text} to a list of text lines,", "may be multiple. elif key == 'section': ret_text_lines += ['','']", "class methods to avoid # import issues with the text", "in node.text: label = 'section' text = node.text.replace('@doc-section','') elif '@doc-cue'", "if next_line.startswith(']]'): # Found the last line; skip it. break", "for comment in comment_blocks: # Handle title declarations. if '@doc-title'", "# ''' # Turn the ext_dir's readme into a bbcode", "= spec.root_path / rel_path # Get lines for all files.", "tree.xpath('/*/cues')[0] # Stride through comments/cues in the list. # Looking", "-- confusion. elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) # Always one increment per", "Extract documentation text from a decorated lua file. ''' text", "# Header gets an 'overview' label. if key == 'overview':", "of (label, text) hold the extracted text lines. doc_text_sections =", "not in sys.path: sys.path.append(x4_customizer_dir) from Framework.Make_Documentation import Merge_Lines #from Framework.Make_Documentation", "functions, and break into convenient lines. text_lines = [indent +", "text from a decorated lua file. ''' text = lua_path.read_text()", "next_line = lua_lines[i] if next_line.startswith(']]'): # Found the last line;", "Title to put on label lines. # Starts blank, filled", "in sys.path: sys.path.append(x4_customizer_dir) from Framework.Make_Documentation import Merge_Lines #from Framework.Make_Documentation import", "lua_lines[i] if next_line.startswith(']]'): # Found the last line; skip it.", "doc_lines = [] for file_path in file_list: if file_path.suffix ==", "True ret_text_lines += ['', '### {} Cues'.format(title), ''] # Bullet", "[indent + line for line in text.splitlines()] # Record for", "labelling and formatting applied. Expects the input to start with", "comment_blocks: # Handle title declarations. if '@doc-title' in comment: label", "return Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path): ''' Extract documentation text from a", "< len(lua_lines): this_line = lua_lines[i] if this_line.startswith('--[['): # Scan until", "file_path in file_list: if file_path.suffix == '.xml': doc_lines += Get_XML_Cue_Text(file_path)", "the end of the doc. while i < len(lua_lines): next_line", "Get_Lua_Text(lua_path): ''' Extract documentation text from a decorated lua file.", "up the full path. doc_path = spec.root_path / rel_path #", "'Release' # if not release_dir.exists(): # release_dir.mkdir() # # #", "= False title = '' ret_text_lines = [] for key,", "some text processing. x4_customizer_dir = str(project_dir.parent / 'X4_Customizer') if x4_customizer_dir", "indent = ' ' # Stick a label line when", "''' # Transfer to annotated/indented lines. functions_started = False title", "from itertools import chain project_dir = Path(__file__).resolve().parents[1] # Set up", "awkward how lxml checks this (isinstance doesn't work). if node.tag", "= text.splitlines() i = 0 while i < len(lua_lines): this_line", "Merge_Lines #from Framework.Make_Documentation import Get_BB_Text # Grab the project specifications.", "it. break these_lines.append(next_line) i += 1 comment_blocks.append('\\n'.join(these_lines)) # Check single-line", "the documentation extracted from a decorated MD xml file. '''", "in doc_text_sections: # Extract the title and continue; this isn't", "doc_lines += Get_Lua_Text(file_path) with open(doc_path, 'w') as file: file.write('\\n'.join(doc_lines)) return", "on the forum. #def Make_BB_Code(doc_dir, header_lines = []): # '''", "customizer for some text processing. x4_customizer_dir = str(project_dir.parent / 'X4_Customizer')", "= 'title' text = node.text.replace('@doc-title','') elif '@doc-overview' in node.text: label", "= 'functions' text = comment.replace('@doc-functions','') else: # Unwanted comment; skip.", "Set up an import from the customizer for some text", "file_list: if file_path.suffix == '.xml': doc_lines += Get_XML_Cue_Text(file_path) elif file_path.suffix", "Grab the project specifications. from Release_Specs import release_specs def Make():", "''] # Bullet the function name. ret_text_lines.append('* **{}**'.format(key)) # Process", "is not etree.Comment: continue # Handle title declarations. if '@doc-title'", "label = node.getnext().get('name') text = node.text.replace('@doc-cue','') else: # Unwanted comment;", "'Readme.md').read_text().splitlines() # # Generate a bbcode version, prefixing with custom", "text a bit. text = Merge_Lines(text) # Add indents to", "doc_text_sections = [] # Go through the comments looking for", "+ Get_BB_Text(doc_lines) # (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\\n'.join(bb_lines)) # return", "''' Support for generating documentation readmes for the extensions. Extracts", "continue # Record it. doc_text_sections.append((label, text)) # Process into lines", "node.text: label = 'overview' text = node.text.replace('@doc-overview','') elif '@doc-section' in", "release_specs def Make(): for spec in release_specs: # Update all", "isn't printed directly. if key == 'title': title = text.strip()", "chain(root.iterchildren(), cues.iterchildren()): # Skip non-comments. # Kinda awkward how lxml", "elif '@doc-section' in node.text: label = 'section' text = node.text.replace('@doc-section','')", "import release_specs def Make(): for spec in release_specs: # Update", "continue # Header gets an 'overview' label. if key ==", "= text.strip() continue # Header gets an 'overview' label. if", "if node.tag is not etree.Comment: continue # Handle title declarations.", "hold the extracted text lines. doc_text_sections = [] # Go", "'overview' label. if key == 'overview': ret_text_lines += ['', '###", "Kinda awkward how lxml checks this (isinstance doesn't work). if", "# Process into lines and return. return Sections_To_Lines(doc_text_sections) #-Removed; generally", "= '' # Lua functions are in one lump, like", "Record for output. ret_text_lines += text_lines return ret_text_lines def Get_XML_Cue_Text(xml_path):", "= node.text.replace('@doc-section','') elif '@doc-cue' in node.text: label = node.getnext().get('name') text", "'.xml': doc_lines += Get_XML_Cue_Text(file_path) elif file_path.suffix == '.lua': doc_lines +=", "doc. while i < len(lua_lines): next_line = lua_lines[i] if next_line.startswith(']]'):", "in chain(root.iterchildren(), cues.iterchildren()): # Skip non-comments. # Kinda awkward how", "lines. doc_text_sections = [] # Read the xml and pick", "elif '@doc-cue' in node.text: label = node.getnext().get('name') text = node.text.replace('@doc-cue','')", "lines. # Starts blank, filled by decorator. title = ''", "in comment_blocks: # Handle title declarations. if '@doc-title' in comment:", "text processing. x4_customizer_dir = str(project_dir.parent / 'X4_Customizer') if x4_customizer_dir not", "''' Converts a dict of {section label: text} to a", "start with a 'title', then 'overview', then a series of", "Cues'.format(title), ''] # Bullet the function name. ret_text_lines.append('* **{}**'.format(key)) #", "spec.root_path / rel_path # Get lines for all files. doc_lines", "import Get_BB_Text # Grab the project specifications. from Release_Specs import", "'' # Lua functions are in one lump, like overview.", "len(lua_lines): this_line = lua_lines[i] if this_line.startswith('--[['): # Scan until the", "text = lua_path.read_text() ret_text_lines = [] # Extract non-indented comments.", "Converts a dict of {section label: text} to a list", "[] # Go through the comments looking for decorators. for", "cues = tree.xpath('/*/cues')[0] # Stride through comments/cues in the list.", "i += 1 # Only search to the end of", "Found the last line; skip it. break these_lines.append(next_line) i +=", "# Handle title declarations. if '@doc-title' in comment: label =", "# Unwanted comment; skip. continue # Record it. doc_text_sections.append((label, text))", "search to the end of the doc. while i <", "elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) # Always one increment per loop. i", "comments. for node in chain(root.iterchildren(), cues.iterchildren()): # Skip non-comments. #", "import issues with the text helper functions below.) for rel_path,", "lump, like overview. elif key == 'functions': ret_text_lines += ['',", "a label line when starting the function section. if not", "Skip non-comments. # Kinda awkward how lxml checks this (isinstance", "'section': ret_text_lines += ['',''] indent = '' # Otherwise these", "bb_lines = header_lines + Get_BB_Text(doc_lines) # (release_dir / (doc_dir.name +", "label: text} to a list of text lines, with labelling", "for output. ret_text_lines += text_lines return ret_text_lines def Get_XML_Cue_Text(xml_path): '''", "these are md cues. else: indent = ' ' #", "for generating documentation readmes for the extensions. Extracts from decorated", "the last line; skip it. break these_lines.append(next_line) i += 1", "all files. doc_lines = [] for file_path in file_list: if", "for file_path in file_list: if file_path.suffix == '.xml': doc_lines +=", "= comment.replace('@doc-title','') # Text blocks are either overview or cue.", "the readme contents. # doc_lines = (doc_dir / 'Readme.md').read_text().splitlines() #", "avoiding putting main docs on the forum. #def Make_BB_Code(doc_dir, header_lines", "'' ret_text_lines = [] for key, text in doc_text_sections: #", "''' from pathlib import Path from lxml import etree import", "doesn't work). if node.tag is not etree.Comment: continue # Handle", "not etree.Comment: continue # Handle title declarations. if '@doc-title' in", "bbcode version, prefixing with custom header. # bb_lines = header_lines", "text = node.text.replace('@doc-cue','') else: # Unwanted comment; skip. continue #", "itertools import chain project_dir = Path(__file__).resolve().parents[1] # Set up an", "'title': title = text.strip() continue # Header gets an 'overview'", "block comments, to avoid # -- confusion. elif this_line.startswith('--'): comment_blocks.append(this_line.replace('--',''))", "< len(lua_lines): next_line = lua_lines[i] if next_line.startswith(']]'): # Found the", "if '@doc-title' in node.text: label = 'title' text = node.text.replace('@doc-title','')", "decorated MD xml file. ''' # List of tuples of", "Output is placed in the release folder. # ''' #", "the extensions. Extracts from decorated lua block comments and xml", "of the content.xml files. spec.Update_Content_Version() # Make each of the", "the text helper functions below.) for rel_path, file_list in spec.doc_specs.items():", "work). if node.tag is not etree.Comment: continue # Handle title", "lua file. ''' text = lua_path.read_text() ret_text_lines = [] #", "import sys from itertools import chain project_dir = Path(__file__).resolve().parents[1] #", "helper functions below.) for rel_path, file_list in spec.doc_specs.items(): # Set", "as file: file.write('\\n'.join(doc_lines)) return def Sections_To_Lines(doc_text_sections): ''' Converts a dict", "section. if not functions_started: functions_started = True ret_text_lines += ['',", "node in chain(root.iterchildren(), cues.iterchildren()): # Skip non-comments. # Kinda awkward", "the function section. if not functions_started: functions_started = True ret_text_lines", "doc_text_sections = [] # Read the xml and pick out", "this isn't printed directly. if key == 'title': title =", "and xml comments. ''' from pathlib import Path from lxml", "= project_dir / 'Release' # if not release_dir.exists(): # release_dir.mkdir()", "# Make each of the doc files (if any). #", "'title', then 'overview', then a series of names of cues", "else: indent = ' ' # Stick a label line", "release folder. # ''' # release_dir = project_dir / 'Release'", "input to start with a 'title', then 'overview', then a", "return. return Sections_To_Lines(doc_text_sections) #-Removed; generally avoiding putting main docs on", "# Read the xml and pick out the cues. tree", "# Process the text a bit. text = Merge_Lines(text) #", "i < len(lua_lines): next_line = lua_lines[i] if next_line.startswith(']]'): # Found", "''' # List of tuples of (label, text) hold the", "text)) # Process into lines and return. return Sections_To_Lines(doc_text_sections) def", "title declarations. if '@doc-title' in comment: label = 'title' text", "import from the customizer for some text processing. x4_customizer_dir =", "from the customizer for some text processing. x4_customizer_dir = str(project_dir.parent", "doc_path = spec.root_path / rel_path # Get lines for all", "lua block comments and xml comments. ''' from pathlib import", "# Check single-line comments after block comments, to avoid #", "comment in comment_blocks: # Handle title declarations. if '@doc-title' in", "# (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\\n'.join(bb_lines)) # return if __name__", "import Merge_Lines #from Framework.Make_Documentation import Get_BB_Text # Grab the project", "file. ''' text = lua_path.read_text() ret_text_lines = [] # Extract", "# Title to put on label lines. # Starts blank,", "this (isinstance doesn't work). if node.tag is not etree.Comment: continue", "single-line comments after block comments, to avoid # -- confusion.", "bbcode txt file. # Output is placed in the release", "str(project_dir.parent / 'X4_Customizer') if x4_customizer_dir not in sys.path: sys.path.append(x4_customizer_dir) from", "/ 'X4_Customizer') if x4_customizer_dir not in sys.path: sys.path.append(x4_customizer_dir) from Framework.Make_Documentation", "these_lines.append(next_line) i += 1 comment_blocks.append('\\n'.join(these_lines)) # Check single-line comments after", "= 0 while i < len(lua_lines): this_line = lua_lines[i] if", "= 'title' text = comment.replace('@doc-title','') # Text blocks are either", "= comment.replace('@doc-overview','') # For now, all functions are lumped together", "# # Generate a bbcode version, prefixing with custom header.", "the doc files (if any). # (Note: this function not", "['', '### {} Overview'.format(title), ''] indent = '' # Lua", "it. doc_text_sections.append((label, text)) # Process into lines and return. return", "files. doc_lines = [] for file_path in file_list: if file_path.suffix", "through comments/cues in the list. # Looking for decorated comments.", "doc files (if any). # (Note: this function not included", "Returns a list of lines holding the documentation extracted from", "node.tag is not etree.Comment: continue # Handle title declarations. if", "either overview or cue. elif '@doc-overview' in comment: label =", "'overview' text = node.text.replace('@doc-overview','') elif '@doc-section' in node.text: label =", "Process into lines and return. return Sections_To_Lines(doc_text_sections) #-Removed; generally avoiding", "x4_customizer_dir = str(project_dir.parent / 'X4_Customizer') if x4_customizer_dir not in sys.path:", "checks this (isinstance doesn't work). if node.tag is not etree.Comment:", "= etree.parse(str(xml_path)) root = tree.xpath('/*')[0] cues = tree.xpath('/*/cues')[0] # Stride", "Add indents to functions, and break into convenient lines. text_lines", "into convenient lines. text_lines = [indent + line for line", "Otherwise these are md cues. else: indent = ' '", "if x4_customizer_dir not in sys.path: sys.path.append(x4_customizer_dir) from Framework.Make_Documentation import Merge_Lines", "indent = '' # Otherwise these are md cues. else:", "for the extensions. Extracts from decorated lua block comments and", "text)) # Process into lines and return. return Sections_To_Lines(doc_text_sections) #-Removed;", "text lines. doc_text_sections = [] # Read the xml and", "''] indent = '' # Lua functions are in one", "comment.replace('@doc-title','') # Text blocks are either overview or cue. elif", "with the text helper functions below.) for rel_path, file_list in", "'functions': ret_text_lines += ['', '### {} Functions'.format(title), ''] indent =", "in one comment. elif '@doc-functions' in comment: label = 'functions'", "''' # Turn the ext_dir's readme into a bbcode txt", "key == 'section': ret_text_lines += ['',''] indent = '' #", "''' # release_dir = project_dir / 'Release' # if not", "comment. elif '@doc-functions' in comment: label = 'functions' text =", "this_line.startswith('--'): comment_blocks.append(this_line.replace('--','')) # Always one increment per loop. i +=", "Check single-line comments after block comments, to avoid # --", "# ''' # release_dir = project_dir / 'Release' # if", "MD xml file. ''' # List of tuples of (label,", "Get_BB_Text # Grab the project specifications. from Release_Specs import release_specs", "decorators. for comment in comment_blocks: # Handle title declarations. if", "list of text lines, with labelling and formatting applied. Expects", "looking for decorators. for comment in comment_blocks: # Handle title", "specifications. from Release_Specs import release_specs def Make(): for spec in", "Grab the readme contents. # doc_lines = (doc_dir / 'Readme.md').read_text().splitlines()", "TODO: maybe regex this. comment_blocks = [] lua_lines = text.splitlines()", "be multiple. elif key == 'section': ret_text_lines += ['',''] indent", "# doc_lines = (doc_dir / 'Readme.md').read_text().splitlines() # # Generate a", "in file_list: if file_path.suffix == '.xml': doc_lines += Get_XML_Cue_Text(file_path) elif", "Process into lines and return. return Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path): '''", "'section' text = node.text.replace('@doc-section','') elif '@doc-cue' in node.text: label =", "functions below.) for rel_path, file_list in spec.doc_specs.items(): # Set up", "Get_Lua_Text(file_path) with open(doc_path, 'w') as file: file.write('\\n'.join(doc_lines)) return def Sections_To_Lines(doc_text_sections):", "''] indent = '' # Sections may be multiple. elif", "while i < len(lua_lines): next_line = lua_lines[i] if next_line.startswith(']]'): #", "#from Framework.Make_Documentation import Get_BB_Text # Grab the project specifications. from", "'' # Otherwise these are md cues. else: indent =", "extracted text lines. doc_text_sections = [] # Go through the", "etree.Comment: continue # Handle title declarations. if '@doc-title' in node.text:", "spec.Update_Content_Version() # Make each of the doc files (if any).", "(Note: this function not included in the class methods to", "indents to functions, and break into convenient lines. text_lines =", "Handle title declarations. if '@doc-title' in node.text: label = 'title'", "[] # Extract non-indented comments. # TODO: maybe regex this.", "of names of cues or functions. ''' # Transfer to", "# Extract non-indented comments. # TODO: maybe regex this. comment_blocks", "doc_text_sections: # Extract the title and continue; this isn't printed", "# Always one increment per loop. i += 1 #", "extensions. Extracts from decorated lua block comments and xml comments.", "== 'section': ret_text_lines += ['',''] indent = '' # Otherwise", "# Go through the comments looking for decorators. for comment", "# Output is placed in the release folder. # '''", "not functions_started: functions_started = True ret_text_lines += ['', '### {}", "overview. elif key == 'functions': ret_text_lines += ['', '### {}", "+ line for line in text.splitlines()] # Record for output.", "Path from lxml import etree import sys from itertools import", "1 # Title to put on label lines. # Starts", "cues. else: indent = ' ' # Stick a label", "]]. these_lines = [] # Record the first line. these_lines.append(this_line.replace('--[[',''))", "open(doc_path, 'w') as file: file.write('\\n'.join(doc_lines)) return def Sections_To_Lines(doc_text_sections): ''' Converts", "in node.text: label = 'overview' text = node.text.replace('@doc-overview','') elif '@doc-section'", "= Path(__file__).resolve().parents[1] # Set up an import from the customizer", "import Path from lxml import etree import sys from itertools", "= lua_path.read_text() ret_text_lines = [] # Extract non-indented comments. #", "list of lines holding the documentation extracted from a decorated", "+= ['',''] indent = '' # Otherwise these are md", "text lines. doc_text_sections = [] # Go through the comments", "List of tuples of (label, text) hold the extracted text", "declarations. if '@doc-title' in node.text: label = 'title' text =", "+= Get_Lua_Text(file_path) with open(doc_path, 'w') as file: file.write('\\n'.join(doc_lines)) return def", "node.text: label = 'section' text = node.text.replace('@doc-section','') elif '@doc-cue' in", "custom header. # bb_lines = header_lines + Get_BB_Text(doc_lines) # (release_dir", "Only search to the end of the doc. while i", "ret_text_lines += text_lines return ret_text_lines def Get_XML_Cue_Text(xml_path): ''' Returns a", "declarations. if '@doc-title' in comment: label = 'title' text =", "cues.iterchildren()): # Skip non-comments. # Kinda awkward how lxml checks", "in the release folder. # ''' # release_dir = project_dir", "# Sections may be multiple. elif key == 'section': ret_text_lines", "['', '### {} Cues'.format(title), ''] # Bullet the function name.", "+= 1 # Only search to the end of the", "{section label: text} to a list of text lines, with", "rel_path # Get lines for all files. doc_lines = []", "methods to avoid # import issues with the text helper", "/ rel_path # Get lines for all files. doc_lines =", "{} Overview'.format(title), ''] indent = '' # Lua functions are", "of the doc files (if any). # (Note: this function", "import chain project_dir = Path(__file__).resolve().parents[1] # Set up an import", "'functions' text = comment.replace('@doc-functions','') else: # Unwanted comment; skip. continue", "a 'title', then 'overview', then a series of names of", "# Stride through comments/cues in the list. # Looking for", "for some text processing. x4_customizer_dir = str(project_dir.parent / 'X4_Customizer') if", "then a series of names of cues or functions. '''", "comments. # TODO: maybe regex this. comment_blocks = [] lua_lines", "each of the doc files (if any). # (Note: this", "the input to start with a 'title', then 'overview', then", "label. if key == 'overview': ret_text_lines += ['', '### {}", "label = 'section' text = node.text.replace('@doc-section','') elif '@doc-cue' in node.text:", "# bb_lines = header_lines + Get_BB_Text(doc_lines) # (release_dir / (doc_dir.name", "next_line.startswith(']]'): # Found the last line; skip it. break these_lines.append(next_line)", "len(lua_lines): next_line = lua_lines[i] if next_line.startswith(']]'): # Found the last", "and continue; this isn't printed directly. if key == 'title':", "of lines holding the documentation extracted from a decorated MD", "function name. ret_text_lines.append('* **{}**'.format(key)) # Process the text a bit.", "through the comments looking for decorators. for comment in comment_blocks:", "in comment: label = 'functions' text = comment.replace('@doc-functions','') else: #", "from decorated lua block comments and xml comments. ''' from", "# Process into lines and return. return Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path):", "lua_path.read_text() ret_text_lines = [] # Extract non-indented comments. # TODO:", "# Scan until the closing ]]. these_lines = [] #", "# Only search to the end of the doc. while", "'### {} Overview'.format(title), ''] indent = '' # Lua functions", "release_dir.mkdir() # # # Grab the readme contents. # doc_lines", "'X4_Customizer') if x4_customizer_dir not in sys.path: sys.path.append(x4_customizer_dir) from Framework.Make_Documentation import", "Set up the full path. doc_path = spec.root_path / rel_path", "to start with a 'title', then 'overview', then a series", "list. # Looking for decorated comments. for node in chain(root.iterchildren(),", "Extract the title and continue; this isn't printed directly. if", "line. these_lines.append(this_line.replace('--[[','')) i += 1 # Only search to the", "1 comment_blocks.append('\\n'.join(these_lines)) # Check single-line comments after block comments, to", "in comment: label = 'title' text = comment.replace('@doc-title','') # Text", "version, prefixing with custom header. # bb_lines = header_lines +", "+= ['', '### {} Functions'.format(title), ''] indent = '' #", "these_lines.append(this_line.replace('--[[','')) i += 1 # Only search to the end", "the extracted text lines. doc_text_sections = [] # Go through", "documentation readmes for the extensions. Extracts from decorated lua block", "title declarations. if '@doc-title' in node.text: label = 'title' text", "cue. elif '@doc-overview' in comment: label = 'overview' text =", "project_dir / 'Release' # if not release_dir.exists(): # release_dir.mkdir() #", "output. ret_text_lines += text_lines return ret_text_lines def Get_XML_Cue_Text(xml_path): ''' Returns", "# release_dir = project_dir / 'Release' # if not release_dir.exists():", "directly. if key == 'title': title = text.strip() continue #", "line; skip it. break these_lines.append(next_line) i += 1 comment_blocks.append('\\n'.join(these_lines)) #", "Starts blank, filled by decorator. title = '' # List", "= [] # Go through the comments looking for decorators.", "== '.lua': doc_lines += Get_Lua_Text(file_path) with open(doc_path, 'w') as file:", "def Get_Lua_Text(lua_path): ''' Extract documentation text from a decorated lua", "text = node.text.replace('@doc-title','') elif '@doc-overview' in node.text: label = 'overview'", "text_lines return ret_text_lines def Get_XML_Cue_Text(xml_path): ''' Returns a list of", "' ' # Stick a label line when starting the", "while i < len(lua_lines): this_line = lua_lines[i] if this_line.startswith('--[['): #", "skip it. break these_lines.append(next_line) i += 1 comment_blocks.append('\\n'.join(these_lines)) # Check", "file_path.suffix == '.xml': doc_lines += Get_XML_Cue_Text(file_path) elif file_path.suffix == '.lua':", "put on label lines. # Starts blank, filled by decorator.", "rel_path, file_list in spec.doc_specs.items(): # Set up the full path.", "[]): # ''' # Turn the ext_dir's readme into a", "with open(doc_path, 'w') as file: file.write('\\n'.join(doc_lines)) return def Sections_To_Lines(doc_text_sections): '''", "and return. return Sections_To_Lines(doc_text_sections) def Get_Lua_Text(lua_path): ''' Extract documentation text" ]
[ "30 b = 17 print(\"Sum of a and b is\",a", "two numbers a = 30 b = 17 print(\"Sum of", "b = 17 print(\"Sum of a and b is\",a +", "a = 30 b = 17 print(\"Sum of a and", "= 17 print(\"Sum of a and b is\",a + b)", "= 30 b = 17 print(\"Sum of a and b", "#Addition of two numbers a = 30 b = 17", "numbers a = 30 b = 17 print(\"Sum of a", "of two numbers a = 30 b = 17 print(\"Sum" ]
[ "= QLabel(self) self.l1.setText('Clique em um botao') self.l1.move(50, 50) self.l1.setStyleSheet('QLabel{font: bold;", "self.carro = QLabel(self) self.carro.move(25, 200) self.carro.resize(450, 337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def gera_caixas_de_texto(self):", "font-weight: 600}') botao2.clicked.connect(self.b2) botao3 = QPushButton('Texto', self) botao3.move(500, 100) botao3.resize(100,", "self.carro.resize(450, 337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def gera_caixas_de_texto(self): self.caixa_texto = QLineEdit(self) self.caixa_texto.move(25, 10)", "black;} QPushButton:hover{ background: orange; font-weight: 600;}') botao1.clicked.connect(self.b1) botao2 = QPushButton('Botao", "texto/a4.py import sys from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QToolTip,", "# botoes botao1 = QPushButton('Botao 1', self) botao1.move(100, 100) botao1.resize(100,", "from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit) from", "QPushButton('Botao 1', self) botao1.move(100, 100) botao1.resize(100, 50) botao1.setStyleSheet( 'QPushButton{background-color: white;", "'Primeira janela' self.gera_labels() self.gera_botoes() self.gera_imagens() self.gera_caixas_de_texto() def carregar_janela(self): self.setGeometry(self.esquerda, self.topo,", "') self.l2.move(300, 30) self.l2.resize(260, 50) self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}') def", "bold; font-size: 30px;}') def gera_imagens(self): self.carro = QLabel(self) self.carro.move(25, 200)", "2', self) botao2.move(300, 100) botao2.resize(100, 50) botao2.setStyleSheet( 'QPushButton{background-color: blue; color:", "def carregar_janela(self): self.setGeometry(self.esquerda, self.topo, self.largura, self.altura) self.setWindowTitle(self.titulo) self.show() def gera_botoes(self):", "QLabel(self) self.carro.move(25, 200) self.carro.resize(450, 337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def gera_caixas_de_texto(self): self.caixa_texto =", "botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{ background: orange; font-weight: 600}') botao3.clicked.connect(self.b3)", "20px;}') self.l1.resize(250, 50) self.l2 = QLabel(self) self.l2.setText('Digitou: ') self.l2.move(300, 30)", "import QtGui class Janela(QMainWindow): def __init__(self): super().__init__() self.topo = 50", "'QPushButton{background-color: blue; color: white;} QPushButton:hover{ background: orange; font-weight: 600}') botao2.clicked.connect(self.b2)", "color: black;} QPushButton:hover{ background: orange; font-weight: 600;}') botao1.clicked.connect(self.b1) botao2 =", "__name__ == '__main__': app = QApplication(sys.argv) janela = Janela() janela.carregar_janela()", "= QPushButton('Texto', self) botao3.move(500, 100) botao3.resize(100, 50) botao3.setStyleSheet('QPushButton{background-color: black; color:", "def b3(self): conteudo = self.caixa_texto.text() self.l2.setText('Digitou: {}'.format(conteudo)) if __name__ ==", "gera_labels(self): self.l1 = QLabel(self) self.l1.setText('Clique em um botao') self.l1.move(50, 50)", "QLabel, QLineEdit) from PyQt5 import QtGui class Janela(QMainWindow): def __init__(self):", "em um botao') self.l1.move(50, 50) self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}') self.l1.resize(250,", "PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit) from PyQt5", "QPushButton('Texto', self) botao3.move(500, 100) botao3.resize(100, 50) botao3.setStyleSheet('QPushButton{background-color: black; color: white;}", "if __name__ == '__main__': app = QApplication(sys.argv) janela = Janela()", "self.esquerda = 50 self.largura = 800 self.altura = 600 self.titulo", "600 self.titulo = 'Primeira janela' self.gera_labels() self.gera_botoes() self.gera_imagens() self.gera_caixas_de_texto() def", "bold; font-size: 20px;}') self.l1.resize(250, 50) self.l2 = QLabel(self) self.l2.setText('Digitou: ')", "50 self.largura = 800 self.altura = 600 self.titulo = 'Primeira", "30) self.l2.resize(260, 50) self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}') def gera_imagens(self): self.carro", "import (QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit) from PyQt5 import", "800 self.altura = 600 self.titulo = 'Primeira janela' self.gera_labels() self.gera_botoes()", "337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def gera_caixas_de_texto(self): self.caixa_texto = QLineEdit(self) self.caixa_texto.move(25, 10) self.caixa_texto.resize(150,", "botao3.clicked.connect(self.b3) def gera_labels(self): self.l1 = QLabel(self) self.l1.setText('Clique em um botao')", "def gera_labels(self): self.l1 = QLabel(self) self.l1.setText('Clique em um botao') self.l1.move(50,", "50) def b1(self): # forma 1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def b2(self, l):", "600}') botao3.clicked.connect(self.b3) def gera_labels(self): self.l1 = QLabel(self) self.l1.setText('Clique em um", "self.gera_botoes() self.gera_imagens() self.gera_caixas_de_texto() def carregar_janela(self): self.setGeometry(self.esquerda, self.topo, self.largura, self.altura) self.setWindowTitle(self.titulo)", "self.setGeometry(self.esquerda, self.topo, self.largura, self.altura) self.setWindowTitle(self.titulo) self.show() def gera_botoes(self): # botoes", "self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def gera_caixas_de_texto(self): self.caixa_texto = QLineEdit(self) self.caixa_texto.move(25, 10) self.caixa_texto.resize(150, 50)", "self.setWindowTitle(self.titulo) self.show() def gera_botoes(self): # botoes botao1 = QPushButton('Botao 1',", "botao1.resize(100, 50) botao1.setStyleSheet( 'QPushButton{background-color: white; color: black;} QPushButton:hover{ background: orange;", "def gera_botoes(self): # botoes botao1 = QPushButton('Botao 1', self) botao1.move(100,", "QToolTip, QLabel, QLineEdit) from PyQt5 import QtGui class Janela(QMainWindow): def", "color: white;} QPushButton:hover{ background: orange; font-weight: 600}') botao3.clicked.connect(self.b3) def gera_labels(self):", "1', self) botao1.move(100, 100) botao1.resize(100, 50) botao1.setStyleSheet( 'QPushButton{background-color: white; color:", "self.gera_labels() self.gera_botoes() self.gera_imagens() self.gera_caixas_de_texto() def carregar_janela(self): self.setGeometry(self.esquerda, self.topo, self.largura, self.altura)", "self.carro.move(25, 200) self.carro.resize(450, 337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def gera_caixas_de_texto(self): self.caixa_texto = QLineEdit(self)", "= QLineEdit(self) self.caixa_texto.move(25, 10) self.caixa_texto.resize(150, 50) def b1(self): # forma", "self) botao1.move(100, 100) botao1.resize(100, 50) botao1.setStyleSheet( 'QPushButton{background-color: white; color: black;}", "1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def b2(self, l): # forma 2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def", "50) botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{ background: orange; font-weight: 600}')", "black; color: white;} QPushButton:hover{ background: orange; font-weight: 600}') botao3.clicked.connect(self.b3) def", "conteudo = self.caixa_texto.text() self.l2.setText('Digitou: {}'.format(conteudo)) if __name__ == '__main__': app", "sys from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit)", "self.l1.move(50, 50) self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}') self.l1.resize(250, 50) self.l2 =", "orange; font-weight: 600}') botao2.clicked.connect(self.b2) botao3 = QPushButton('Texto', self) botao3.move(500, 100)", "background: orange; font-weight: 600}') botao2.clicked.connect(self.b2) botao3 = QPushButton('Texto', self) botao3.move(500,", "botao') self.l1.move(50, 50) self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}') self.l1.resize(250, 50) self.l2", "QPushButton:hover{ background: orange; font-weight: 600}') botao3.clicked.connect(self.b3) def gera_labels(self): self.l1 =", "botao1.setStyleSheet( 'QPushButton{background-color: white; color: black;} QPushButton:hover{ background: orange; font-weight: 600;}')", "font-size: 30px;}') def gera_imagens(self): self.carro = QLabel(self) self.carro.move(25, 200) self.carro.resize(450,", "color: white;} QPushButton:hover{ background: orange; font-weight: 600}') botao2.clicked.connect(self.b2) botao3 =", "self.altura) self.setWindowTitle(self.titulo) self.show() def gera_botoes(self): # botoes botao1 = QPushButton('Botao", "100) botao2.resize(100, 50) botao2.setStyleSheet( 'QPushButton{background-color: blue; color: white;} QPushButton:hover{ background:", "self.l1.setText('Clique em um botao') self.l1.move(50, 50) self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}')", "botao2.move(300, 100) botao2.resize(100, 50) botao2.setStyleSheet( 'QPushButton{background-color: blue; color: white;} QPushButton:hover{", "self.l1 = QLabel(self) self.l1.setText('Clique em um botao') self.l1.move(50, 50) self.l1.setStyleSheet('QLabel{font:", "== '__main__': app = QApplication(sys.argv) janela = Janela() janela.carregar_janela() sys.exit(app.exec_())", "self.gera_caixas_de_texto() def carregar_janela(self): self.setGeometry(self.esquerda, self.topo, self.largura, self.altura) self.setWindowTitle(self.titulo) self.show() def", "botao1.move(100, 100) botao1.resize(100, 50) botao1.setStyleSheet( 'QPushButton{background-color: white; color: black;} QPushButton:hover{", "from PyQt5 import QtGui class Janela(QMainWindow): def __init__(self): super().__init__() self.topo", "# forma 2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def b3(self): conteudo = self.caixa_texto.text() self.l2.setText('Digitou:", "= 800 self.altura = 600 self.titulo = 'Primeira janela' self.gera_labels()", "QLineEdit) from PyQt5 import QtGui class Janela(QMainWindow): def __init__(self): super().__init__()", "font-weight: 600;}') botao1.clicked.connect(self.b1) botao2 = QPushButton('Botao 2', self) botao2.move(300, 100)", "self.largura, self.altura) self.setWindowTitle(self.titulo) self.show() def gera_botoes(self): # botoes botao1 =", "QLabel(self) self.l2.setText('Digitou: ') self.l2.move(300, 30) self.l2.resize(260, 50) self.l2.setStyleSheet('QLabel{font: bold; font-size:", "um botao') self.l1.move(50, 50) self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}') self.l1.resize(250, 50)", "botao3.resize(100, 50) botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{ background: orange; font-weight:", "self.show() def gera_botoes(self): # botoes botao1 = QPushButton('Botao 1', self)", "botoes botao1 = QPushButton('Botao 1', self) botao1.move(100, 100) botao1.resize(100, 50)", "orange; font-weight: 600}') botao3.clicked.connect(self.b3) def gera_labels(self): self.l1 = QLabel(self) self.l1.setText('Clique", "gera_imagens(self): self.carro = QLabel(self) self.carro.move(25, 200) self.carro.resize(450, 337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def", "600}') botao2.clicked.connect(self.b2) botao3 = QPushButton('Texto', self) botao3.move(500, 100) botao3.resize(100, 50)", "50) botao2.setStyleSheet( 'QPushButton{background-color: blue; color: white;} QPushButton:hover{ background: orange; font-weight:", "botao3 = QPushButton('Texto', self) botao3.move(500, 100) botao3.resize(100, 50) botao3.setStyleSheet('QPushButton{background-color: black;", "= QPushButton('Botao 1', self) botao1.move(100, 100) botao1.resize(100, 50) botao1.setStyleSheet( 'QPushButton{background-color:", "font-size: 20px;}') self.l1.resize(250, 50) self.l2 = QLabel(self) self.l2.setText('Digitou: ') self.l2.move(300,", "def __init__(self): super().__init__() self.topo = 50 self.esquerda = 50 self.largura", "super().__init__() self.topo = 50 self.esquerda = 50 self.largura = 800", "self.largura = 800 self.altura = 600 self.titulo = 'Primeira janela'", "self.topo = 50 self.esquerda = 50 self.largura = 800 self.altura", "self.l2 = QLabel(self) self.l2.setText('Digitou: ') self.l2.move(300, 30) self.l2.resize(260, 50) self.l2.setStyleSheet('QLabel{font:", "botao3.move(500, 100) botao3.resize(100, 50) botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{ background:", "white;} QPushButton:hover{ background: orange; font-weight: 600}') botao3.clicked.connect(self.b3) def gera_labels(self): self.l1", "200) self.carro.resize(450, 337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def gera_caixas_de_texto(self): self.caixa_texto = QLineEdit(self) self.caixa_texto.move(25,", "def gera_imagens(self): self.carro = QLabel(self) self.carro.move(25, 200) self.carro.resize(450, 337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg'))", "self.gera_imagens() self.gera_caixas_de_texto() def carregar_janela(self): self.setGeometry(self.esquerda, self.topo, self.largura, self.altura) self.setWindowTitle(self.titulo) self.show()", "QPushButton('Botao 2', self) botao2.move(300, 100) botao2.resize(100, 50) botao2.setStyleSheet( 'QPushButton{background-color: blue;", "10) self.caixa_texto.resize(150, 50) def b1(self): # forma 1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def", "self.l2.resize(260, 50) self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}') def gera_imagens(self): self.carro =", "gera_caixas_de_texto(self): self.caixa_texto = QLineEdit(self) self.caixa_texto.move(25, 10) self.caixa_texto.resize(150, 50) def b1(self):", "self.topo, self.largura, self.altura) self.setWindowTitle(self.titulo) self.show() def gera_botoes(self): # botoes botao1", "def b2(self, l): # forma 2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def b3(self): conteudo", "self.l2.setText('Digitou: {}'.format(conteudo)) if __name__ == '__main__': app = QApplication(sys.argv) janela", "self.titulo = 'Primeira janela' self.gera_labels() self.gera_botoes() self.gera_imagens() self.gera_caixas_de_texto() def carregar_janela(self):", "__init__(self): super().__init__() self.topo = 50 self.esquerda = 50 self.largura =", "= 'Primeira janela' self.gera_labels() self.gera_botoes() self.gera_imagens() self.gera_caixas_de_texto() def carregar_janela(self): self.setGeometry(self.esquerda,", "{}'.format(conteudo)) if __name__ == '__main__': app = QApplication(sys.argv) janela =", "600;}') botao1.clicked.connect(self.b1) botao2 = QPushButton('Botao 2', self) botao2.move(300, 100) botao2.resize(100,", "'QPushButton{background-color: white; color: black;} QPushButton:hover{ background: orange; font-weight: 600;}') botao1.clicked.connect(self.b1)", "PyQt5 import QtGui class Janela(QMainWindow): def __init__(self): super().__init__() self.topo =", "background: orange; font-weight: 600;}') botao1.clicked.connect(self.b1) botao2 = QPushButton('Botao 2', self)", "botao1.clicked.connect(self.b1) botao2 = QPushButton('Botao 2', self) botao2.move(300, 100) botao2.resize(100, 50)", "= 50 self.largura = 800 self.altura = 600 self.titulo =", "orange; font-weight: 600;}') botao1.clicked.connect(self.b1) botao2 = QPushButton('Botao 2', self) botao2.move(300,", "100) botao3.resize(100, 50) botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{ background: orange;", "def b1(self): # forma 1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def b2(self, l): #", "botao2 = QPushButton('Botao 2', self) botao2.move(300, 100) botao2.resize(100, 50) botao2.setStyleSheet(", "50) self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}') self.l1.resize(250, 50) self.l2 = QLabel(self)", "QPushButton:hover{ background: orange; font-weight: 600;}') botao1.clicked.connect(self.b1) botao2 = QPushButton('Botao 2',", "QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit) from PyQt5 import QtGui class", "Janela(QMainWindow): def __init__(self): super().__init__() self.topo = 50 self.esquerda = 50", "QLabel(self) self.l1.setText('Clique em um botao') self.l1.move(50, 50) self.l1.setStyleSheet('QLabel{font: bold; font-size:", "caixa de texto/a4.py import sys from PyQt5.QtWidgets import (QApplication, QMainWindow,", "botao1 = QPushButton('Botao 1', self) botao1.move(100, 100) botao1.resize(100, 50) botao1.setStyleSheet(", "font-weight: 600}') botao3.clicked.connect(self.b3) def gera_labels(self): self.l1 = QLabel(self) self.l1.setText('Clique em", "self.l2.move(300, 30) self.l2.resize(260, 50) self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}') def gera_imagens(self):", "b1(self): # forma 1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def b2(self, l): # forma", "de texto/a4.py import sys from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton,", "<reponame>andersonssh/aprendendo-pyqt5<filename>curso 1/04 - caixa de texto/a4.py import sys from PyQt5.QtWidgets", "100) botao1.resize(100, 50) botao1.setStyleSheet( 'QPushButton{background-color: white; color: black;} QPushButton:hover{ background:", "self.caixa_texto.move(25, 10) self.caixa_texto.resize(150, 50) def b1(self): # forma 1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg'))", "botao2.clicked.connect(self.b2) botao3 = QPushButton('Texto', self) botao3.move(500, 100) botao3.resize(100, 50) botao3.setStyleSheet('QPushButton{background-color:", "blue; color: white;} QPushButton:hover{ background: orange; font-weight: 600}') botao2.clicked.connect(self.b2) botao3", "QLineEdit(self) self.caixa_texto.move(25, 10) self.caixa_texto.resize(150, 50) def b1(self): # forma 1", "self.altura = 600 self.titulo = 'Primeira janela' self.gera_labels() self.gera_botoes() self.gera_imagens()", "white; color: black;} QPushButton:hover{ background: orange; font-weight: 600;}') botao1.clicked.connect(self.b1) botao2", "QPushButton:hover{ background: orange; font-weight: 600}') botao2.clicked.connect(self.b2) botao3 = QPushButton('Texto', self)", "background: orange; font-weight: 600}') botao3.clicked.connect(self.b3) def gera_labels(self): self.l1 = QLabel(self)", "self.caixa_texto.resize(150, 50) def b1(self): # forma 1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def b2(self,", "carregar_janela(self): self.setGeometry(self.esquerda, self.topo, self.largura, self.altura) self.setWindowTitle(self.titulo) self.show() def gera_botoes(self): #", "gera_botoes(self): # botoes botao1 = QPushButton('Botao 1', self) botao1.move(100, 100)", "= 50 self.esquerda = 50 self.largura = 800 self.altura =", "import sys from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QToolTip, QLabel,", "def gera_caixas_de_texto(self): self.caixa_texto = QLineEdit(self) self.caixa_texto.move(25, 10) self.caixa_texto.resize(150, 50) def", "- caixa de texto/a4.py import sys from PyQt5.QtWidgets import (QApplication,", "= 600 self.titulo = 'Primeira janela' self.gera_labels() self.gera_botoes() self.gera_imagens() self.gera_caixas_de_texto()", "50) self.l2 = QLabel(self) self.l2.setText('Digitou: ') self.l2.move(300, 30) self.l2.resize(260, 50)", "class Janela(QMainWindow): def __init__(self): super().__init__() self.topo = 50 self.esquerda =", "self) botao3.move(500, 100) botao3.resize(100, 50) botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{", "self.caixa_texto = QLineEdit(self) self.caixa_texto.move(25, 10) self.caixa_texto.resize(150, 50) def b1(self): #", "50) self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}') def gera_imagens(self): self.carro = QLabel(self)", "50 self.esquerda = 50 self.largura = 800 self.altura = 600", "self.l1.resize(250, 50) self.l2 = QLabel(self) self.l2.setText('Digitou: ') self.l2.move(300, 30) self.l2.resize(260,", "= self.caixa_texto.text() self.l2.setText('Digitou: {}'.format(conteudo)) if __name__ == '__main__': app =", "30px;}') def gera_imagens(self): self.carro = QLabel(self) self.carro.move(25, 200) self.carro.resize(450, 337)", "= QLabel(self) self.carro.move(25, 200) self.carro.resize(450, 337) self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def gera_caixas_de_texto(self): self.caixa_texto", "l): # forma 2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def b3(self): conteudo = self.caixa_texto.text()", "b3(self): conteudo = self.caixa_texto.text() self.l2.setText('Digitou: {}'.format(conteudo)) if __name__ == '__main__':", "self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def b3(self): conteudo = self.caixa_texto.text() self.l2.setText('Digitou: {}'.format(conteudo)) if __name__", "b2(self, l): # forma 2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def b3(self): conteudo =", "50) botao1.setStyleSheet( 'QPushButton{background-color: white; color: black;} QPushButton:hover{ background: orange; font-weight:", "white;} QPushButton:hover{ background: orange; font-weight: 600}') botao2.clicked.connect(self.b2) botao3 = QPushButton('Texto',", "(QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit) from PyQt5 import QtGui", "2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def b3(self): conteudo = self.caixa_texto.text() self.l2.setText('Digitou: {}'.format(conteudo)) if", "1/04 - caixa de texto/a4.py import sys from PyQt5.QtWidgets import", "self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}') def gera_imagens(self): self.carro = QLabel(self) self.carro.move(25,", "= QPushButton('Botao 2', self) botao2.move(300, 100) botao2.resize(100, 50) botao2.setStyleSheet( 'QPushButton{background-color:", "# forma 1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def b2(self, l): # forma 2", "QtGui class Janela(QMainWindow): def __init__(self): super().__init__() self.topo = 50 self.esquerda", "botao2.resize(100, 50) botao2.setStyleSheet( 'QPushButton{background-color: blue; color: white;} QPushButton:hover{ background: orange;", "janela' self.gera_labels() self.gera_botoes() self.gera_imagens() self.gera_caixas_de_texto() def carregar_janela(self): self.setGeometry(self.esquerda, self.topo, self.largura,", "botao2.setStyleSheet( 'QPushButton{background-color: blue; color: white;} QPushButton:hover{ background: orange; font-weight: 600}')", "= QLabel(self) self.l2.setText('Digitou: ') self.l2.move(300, 30) self.l2.resize(260, 50) self.l2.setStyleSheet('QLabel{font: bold;", "self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}') self.l1.resize(250, 50) self.l2 = QLabel(self) self.l2.setText('Digitou:", "self.caixa_texto.text() self.l2.setText('Digitou: {}'.format(conteudo)) if __name__ == '__main__': app = QApplication(sys.argv)", "QPushButton, QToolTip, QLabel, QLineEdit) from PyQt5 import QtGui class Janela(QMainWindow):", "self.l2.setText('Digitou: ') self.l2.move(300, 30) self.l2.resize(260, 50) self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}')", "self) botao2.move(300, 100) botao2.resize(100, 50) botao2.setStyleSheet( 'QPushButton{background-color: blue; color: white;}", "forma 1 self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def b2(self, l): # forma 2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg'))", "self.carro.setPixmap(QtGui.QPixmap('carro.jpg')) def b2(self, l): # forma 2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def b3(self):", "forma 2 self.carro.setPixmap(QtGui.QPixmap('carro2.jpg')) def b3(self): conteudo = self.caixa_texto.text() self.l2.setText('Digitou: {}'.format(conteudo))" ]
[ "height=\"{img_height}\" xlink:href=\"data:image/png;base64,{base64_data}\"/> </g> </svg>''' new_page_height += (img_height+10) svg += tmp_svg", "/> <int name=\"ruleColor\" value=\"0\" /> <float name=\"marginLeft\" value=\"0\" /> <float", "<float name=\"marginLeft\" value=\"0\" /> <float name=\"xOffset\" value=\"-380.701752\" /> <float name=\"xRuling\"", "write-std-ruling write-scale-down\" fill=\"none\" stroke=\"none\" stroke-width=\"1\" shape-rendering=\"crispEdges\" vector-effect=\"non-scaling-stroke\"> <rect class=\"pagerect\" fill=\"#FFFFFF\"", "PyPDF2 svg = '''<svg id=\"write-document\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <rect id=\"write-doc-background\" width=\"100%\"", "height=\"100%\" fill=\"#808080\"/> <defs id=\"write-defs\"> <script type=\"text/writeconfig\"> <int name=\"docFormatVersion\" value=\"2\" />", "-rf /tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg', 'w') as f: f.write(svg) os.system(f'gzip -S", "720 n_pages = pdf.getNumPages() + 1 page = pdf.getPage(0) width", "y=\"{new_page_height+10}\" width=\"{img_width}px\" height=\"{img_height}px\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <g class=\"write-content write-v3\" width=\"{img_width}\" height=\"{img_height}\"", "<g class=\"write-content write-v3\" width=\"{img_width}\" height=\"{img_height}\" xruling=\"0\" yruling=\"0\" marginLeft=\"0\" papercolor=\"#FFFFFF\" rulecolor=\"#00000000\">", "/> <int name=\"pageColor\" value=\"-1\" /> <int name=\"pageNum\" value=\"0\" /> <int", "height=\"{img_height}px\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <g class=\"write-content write-v3\" width=\"{img_width}\" height=\"{img_height}\" xruling=\"0\" yruling=\"0\"", "<int name=\"pageColor\" value=\"-1\" /> <int name=\"pageNum\" value=\"0\" /> <int name=\"ruleColor\"", "tmp_svg svg += '''</svg>''' os.system('rm -rf /tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg', 'w')", "<defs id=\"write-defs\"> <script type=\"text/writeconfig\"> <int name=\"docFormatVersion\" value=\"2\" /> <int name=\"pageColor\"", "y=\"0\" width=\"{img_width}\" height=\"{img_height}\" /> </g> <image x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\"", "-p /tmp/pdf2write') new_page_height = 0 for page in range(n_pages): print(f\"Processing", "<float name=\"xOffset\" value=\"-380.701752\" /> <float name=\"xRuling\" value=\"0\" /> <float name=\"yOffset\"", "stroke=\"none\" stroke-width=\"1\" shape-rendering=\"crispEdges\" vector-effect=\"non-scaling-stroke\"> <rect class=\"pagerect\" fill=\"#FFFFFF\" stroke=\"none\" x=\"0\" y=\"0\"", "-png -f {page} -singlefile') with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f: base64_data", "yruling=\"0\" marginLeft=\"0\" papercolor=\"#FFFFFF\" rulecolor=\"#00000000\"> <g class=\"ruleline write-std-ruling write-scale-down\" fill=\"none\" stroke=\"none\"", "value=\"1536.84216\" /> <float name=\"yRuling\" value=\"0\" /> </script> </defs> ''' pdf_path", "= sys.argv[1] pdf = PyPDF2.PdfFileReader(pdf_path, \"rb\") img_width = 720 n_pages", "height=\"{img_height}\" /> </g> <image x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" xlink:href=\"data:image/png;base64,{base64_data}\"/> </g>", "x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" xlink:href=\"data:image/png;base64,{base64_data}\"/> </g> </svg>''' new_page_height += (img_height+10)", "svg += '''</svg>''' os.system('rm -rf /tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg', 'w') as", "id=\"write-defs\"> <script type=\"text/writeconfig\"> <int name=\"docFormatVersion\" value=\"2\" /> <int name=\"pageColor\" value=\"-1\"", "class=\"write-content write-v3\" width=\"{img_width}\" height=\"{img_height}\" xruling=\"0\" yruling=\"0\" marginLeft=\"0\" papercolor=\"#FFFFFF\" rulecolor=\"#00000000\"> <g", "= int(aspect_ratio * img_width) os.system('mkdir -p /tmp/pdf2write') new_page_height = 0", "/> <float name=\"yRuling\" value=\"0\" /> </script> </defs> ''' pdf_path =", "<image x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" xlink:href=\"data:image/png;base64,{base64_data}\"/> </g> </svg>''' new_page_height +=", "import base64 import os import sys import PyPDF2 svg =", "<float name=\"xRuling\" value=\"0\" /> <float name=\"yOffset\" value=\"1536.84216\" /> <float name=\"yRuling\"", "= PyPDF2.PdfFileReader(pdf_path, \"rb\") img_width = 720 n_pages = pdf.getNumPages() +", "shape-rendering=\"crispEdges\" vector-effect=\"non-scaling-stroke\"> <rect class=\"pagerect\" fill=\"#FFFFFF\" stroke=\"none\" x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\"", "id=\"write-doc-background\" width=\"100%\" height=\"100%\" fill=\"#808080\"/> <defs id=\"write-defs\"> <script type=\"text/writeconfig\"> <int name=\"docFormatVersion\"", "fill=\"none\" stroke=\"none\" stroke-width=\"1\" shape-rendering=\"crispEdges\" vector-effect=\"non-scaling-stroke\"> <rect class=\"pagerect\" fill=\"#FFFFFF\" stroke=\"none\" x=\"0\"", "write-scale-down\" fill=\"none\" stroke=\"none\" stroke-width=\"1\" shape-rendering=\"crispEdges\" vector-effect=\"non-scaling-stroke\"> <rect class=\"pagerect\" fill=\"#FFFFFF\" stroke=\"none\"", "/tmp/pdf2write') new_page_height = 0 for page in range(n_pages): print(f\"Processing {page}/{n_pages}\",", "svg += tmp_svg svg += '''</svg>''' os.system('rm -rf /tmp/pdf2write') with", "/> <float name=\"xRuling\" value=\"0\" /> <float name=\"yOffset\" value=\"1536.84216\" /> <float", "= '''<svg id=\"write-document\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <rect id=\"write-doc-background\" width=\"100%\" height=\"100%\" fill=\"#808080\"/>", "in range(n_pages): print(f\"Processing {page}/{n_pages}\", end='\\r') os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f", "int(aspect_ratio * img_width) os.system('mkdir -p /tmp/pdf2write') new_page_height = 0 for", "aspect_ratio = height/width img_height = int(aspect_ratio * img_width) os.system('mkdir -p", "<int name=\"docFormatVersion\" value=\"2\" /> <int name=\"pageColor\" value=\"-1\" /> <int name=\"pageNum\"", "<int name=\"ruleColor\" value=\"0\" /> <float name=\"marginLeft\" value=\"0\" /> <float name=\"xOffset\"", "base64_data = base64.b64encode(f.read()).decode('utf-8') tmp_svg = f'''<svg class=\"write-page\" color-interpolation=\"linearRGB\" x=\"10\" y=\"{new_page_height+10}\"", "value=\"-380.701752\" /> <float name=\"xRuling\" value=\"0\" /> <float name=\"yOffset\" value=\"1536.84216\" />", "img_height = int(aspect_ratio * img_width) os.system('mkdir -p /tmp/pdf2write') new_page_height =", "color-interpolation=\"linearRGB\" x=\"10\" y=\"{new_page_height+10}\" width=\"{img_width}px\" height=\"{img_height}px\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <g class=\"write-content write-v3\"", "class=\"pagerect\" fill=\"#FFFFFF\" stroke=\"none\" x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" /> </g> <image", "-f {page} -singlefile') with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f: base64_data =", "import os import sys import PyPDF2 svg = '''<svg id=\"write-document\"", "img_width) os.system('mkdir -p /tmp/pdf2write') new_page_height = 0 for page in", "{page} -singlefile') with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f: base64_data = base64.b64encode(f.read()).decode('utf-8')", "'rb') as f: base64_data = base64.b64encode(f.read()).decode('utf-8') tmp_svg = f'''<svg class=\"write-page\"", "pdf_path = sys.argv[1] pdf = PyPDF2.PdfFileReader(pdf_path, \"rb\") img_width = 720", "name=\"xOffset\" value=\"-380.701752\" /> <float name=\"xRuling\" value=\"0\" /> <float name=\"yOffset\" value=\"1536.84216\"", "svg = '''<svg id=\"write-document\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <rect id=\"write-doc-background\" width=\"100%\" height=\"100%\"", "PyPDF2.PdfFileReader(pdf_path, \"rb\") img_width = 720 n_pages = pdf.getNumPages() + 1", "/tmp/pdf2write/tmp{page} -png -f {page} -singlefile') with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f:", "{page}/{n_pages}\", end='\\r') os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile') with", "+= (img_height+10) svg += tmp_svg svg += '''</svg>''' os.system('rm -rf", "'''</svg>''' os.system('rm -rf /tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg', 'w') as f: f.write(svg)", "name=\"pageNum\" value=\"0\" /> <int name=\"ruleColor\" value=\"0\" /> <float name=\"marginLeft\" value=\"0\"", "+= '''</svg>''' os.system('rm -rf /tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg', 'w') as f:", "class=\"write-page\" color-interpolation=\"linearRGB\" x=\"10\" y=\"{new_page_height+10}\" width=\"{img_width}px\" height=\"{img_height}px\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <g class=\"write-content", "name=\"pageColor\" value=\"-1\" /> <int name=\"pageNum\" value=\"0\" /> <int name=\"ruleColor\" value=\"0\"", "base64.b64encode(f.read()).decode('utf-8') tmp_svg = f'''<svg class=\"write-page\" color-interpolation=\"linearRGB\" x=\"10\" y=\"{new_page_height+10}\" width=\"{img_width}px\" height=\"{img_height}px\"", "width=\"{img_width}\" height=\"{img_height}\" xruling=\"0\" yruling=\"0\" marginLeft=\"0\" papercolor=\"#FFFFFF\" rulecolor=\"#00000000\"> <g class=\"ruleline write-std-ruling", "xlink:href=\"data:image/png;base64,{base64_data}\"/> </g> </svg>''' new_page_height += (img_height+10) svg += tmp_svg svg", "* img_width) os.system('mkdir -p /tmp/pdf2write') new_page_height = 0 for page", "name=\"xRuling\" value=\"0\" /> <float name=\"yOffset\" value=\"1536.84216\" /> <float name=\"yRuling\" value=\"0\"", "= f'''<svg class=\"write-page\" color-interpolation=\"linearRGB\" x=\"10\" y=\"{new_page_height+10}\" width=\"{img_width}px\" height=\"{img_height}px\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">", "= height/width img_height = int(aspect_ratio * img_width) os.system('mkdir -p /tmp/pdf2write')", "page.mediaBox.getWidth() height = page.mediaBox.getHeight() aspect_ratio = height/width img_height = int(aspect_ratio", "vector-effect=\"non-scaling-stroke\"> <rect class=\"pagerect\" fill=\"#FFFFFF\" stroke=\"none\" x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" />", "import PyPDF2 svg = '''<svg id=\"write-document\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <rect id=\"write-doc-background\"", "range(n_pages): print(f\"Processing {page}/{n_pages}\", end='\\r') os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page}", "<script type=\"text/writeconfig\"> <int name=\"docFormatVersion\" value=\"2\" /> <int name=\"pageColor\" value=\"-1\" />", "fill=\"#808080\"/> <defs id=\"write-defs\"> <script type=\"text/writeconfig\"> <int name=\"docFormatVersion\" value=\"2\" /> <int", "xruling=\"0\" yruling=\"0\" marginLeft=\"0\" papercolor=\"#FFFFFF\" rulecolor=\"#00000000\"> <g class=\"ruleline write-std-ruling write-scale-down\" fill=\"none\"", "(img_height+10) svg += tmp_svg svg += '''</svg>''' os.system('rm -rf /tmp/pdf2write')", "img_width = 720 n_pages = pdf.getNumPages() + 1 page =", "name=\"yOffset\" value=\"1536.84216\" /> <float name=\"yRuling\" value=\"0\" /> </script> </defs> '''", "name=\"ruleColor\" value=\"0\" /> <float name=\"marginLeft\" value=\"0\" /> <float name=\"xOffset\" value=\"-380.701752\"", "= 720 n_pages = pdf.getNumPages() + 1 page = pdf.getPage(0)", "xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <g class=\"write-content write-v3\" width=\"{img_width}\" height=\"{img_height}\" xruling=\"0\" yruling=\"0\" marginLeft=\"0\"", "open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f: base64_data = base64.b64encode(f.read()).decode('utf-8') tmp_svg = f'''<svg", "= page.mediaBox.getWidth() height = page.mediaBox.getHeight() aspect_ratio = height/width img_height =", "os.system('mkdir -p /tmp/pdf2write') new_page_height = 0 for page in range(n_pages):", "value=\"0\" /> <float name=\"marginLeft\" value=\"0\" /> <float name=\"xOffset\" value=\"-380.701752\" />", "name=\"yRuling\" value=\"0\" /> </script> </defs> ''' pdf_path = sys.argv[1] pdf", "<g class=\"ruleline write-std-ruling write-scale-down\" fill=\"none\" stroke=\"none\" stroke-width=\"1\" shape-rendering=\"crispEdges\" vector-effect=\"non-scaling-stroke\"> <rect", "papercolor=\"#FFFFFF\" rulecolor=\"#00000000\"> <g class=\"ruleline write-std-ruling write-scale-down\" fill=\"none\" stroke=\"none\" stroke-width=\"1\" shape-rendering=\"crispEdges\"", "/> <int name=\"pageNum\" value=\"0\" /> <int name=\"ruleColor\" value=\"0\" /> <float", "xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <rect id=\"write-doc-background\" width=\"100%\" height=\"100%\" fill=\"#808080\"/> <defs id=\"write-defs\"> <script", "page in range(n_pages): print(f\"Processing {page}/{n_pages}\", end='\\r') os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png", "/tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg', 'w') as f: f.write(svg) os.system(f'gzip -S z", "{pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile') with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as", "end='\\r') os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile') with open(f'/tmp/pdf2write/tmp{page}.png',", "with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f: base64_data = base64.b64encode(f.read()).decode('utf-8') tmp_svg =", "width=\"{img_width}\" height=\"{img_height}\" xlink:href=\"data:image/png;base64,{base64_data}\"/> </g> </svg>''' new_page_height += (img_height+10) svg +=", "<float name=\"yOffset\" value=\"1536.84216\" /> <float name=\"yRuling\" value=\"0\" /> </script> </defs>", "sys.argv[1] pdf = PyPDF2.PdfFileReader(pdf_path, \"rb\") img_width = 720 n_pages =", "'''<svg id=\"write-document\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <rect id=\"write-doc-background\" width=\"100%\" height=\"100%\" fill=\"#808080\"/> <defs", "id=\"write-document\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <rect id=\"write-doc-background\" width=\"100%\" height=\"100%\" fill=\"#808080\"/> <defs id=\"write-defs\">", "type=\"text/writeconfig\"> <int name=\"docFormatVersion\" value=\"2\" /> <int name=\"pageColor\" value=\"-1\" /> <int", "<rect id=\"write-doc-background\" width=\"100%\" height=\"100%\" fill=\"#808080\"/> <defs id=\"write-defs\"> <script type=\"text/writeconfig\"> <int", "= page.mediaBox.getHeight() aspect_ratio = height/width img_height = int(aspect_ratio * img_width)", "= base64.b64encode(f.read()).decode('utf-8') tmp_svg = f'''<svg class=\"write-page\" color-interpolation=\"linearRGB\" x=\"10\" y=\"{new_page_height+10}\" width=\"{img_width}px\"", "= 0 for page in range(n_pages): print(f\"Processing {page}/{n_pages}\", end='\\r') os.system(f'pdftoppm", "width=\"{img_width}\" height=\"{img_height}\" /> </g> <image x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" xlink:href=\"data:image/png;base64,{base64_data}\"/>", "/> </g> <image x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" xlink:href=\"data:image/png;base64,{base64_data}\"/> </g> </svg>'''", "os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile') with open(f'/tmp/pdf2write/tmp{page}.png', 'rb')", "n_pages = pdf.getNumPages() + 1 page = pdf.getPage(0) width =", "new_page_height = 0 for page in range(n_pages): print(f\"Processing {page}/{n_pages}\", end='\\r')", "name=\"marginLeft\" value=\"0\" /> <float name=\"xOffset\" value=\"-380.701752\" /> <float name=\"xRuling\" value=\"0\"", "pdf.getNumPages() + 1 page = pdf.getPage(0) width = page.mediaBox.getWidth() height", "width=\"{img_width}px\" height=\"{img_height}px\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <g class=\"write-content write-v3\" width=\"{img_width}\" height=\"{img_height}\" xruling=\"0\"", "xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <rect id=\"write-doc-background\" width=\"100%\" height=\"100%\" fill=\"#808080\"/> <defs id=\"write-defs\"> <script type=\"text/writeconfig\">", "\"rb\") img_width = 720 n_pages = pdf.getNumPages() + 1 page", "</script> </defs> ''' pdf_path = sys.argv[1] pdf = PyPDF2.PdfFileReader(pdf_path, \"rb\")", "value=\"0\" /> <int name=\"ruleColor\" value=\"0\" /> <float name=\"marginLeft\" value=\"0\" />", "x=\"10\" y=\"{new_page_height+10}\" width=\"{img_width}px\" height=\"{img_height}px\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <g class=\"write-content write-v3\" width=\"{img_width}\"", "marginLeft=\"0\" papercolor=\"#FFFFFF\" rulecolor=\"#00000000\"> <g class=\"ruleline write-std-ruling write-scale-down\" fill=\"none\" stroke=\"none\" stroke-width=\"1\"", "page.mediaBox.getHeight() aspect_ratio = height/width img_height = int(aspect_ratio * img_width) os.system('mkdir", "name=\"docFormatVersion\" value=\"2\" /> <int name=\"pageColor\" value=\"-1\" /> <int name=\"pageNum\" value=\"0\"", "y=\"0\" width=\"{img_width}\" height=\"{img_height}\" xlink:href=\"data:image/png;base64,{base64_data}\"/> </g> </svg>''' new_page_height += (img_height+10) svg", "new_page_height += (img_height+10) svg += tmp_svg svg += '''</svg>''' os.system('rm", "value=\"-1\" /> <int name=\"pageNum\" value=\"0\" /> <int name=\"ruleColor\" value=\"0\" />", "</defs> ''' pdf_path = sys.argv[1] pdf = PyPDF2.PdfFileReader(pdf_path, \"rb\") img_width", "<float name=\"yRuling\" value=\"0\" /> </script> </defs> ''' pdf_path = sys.argv[1]", "height=\"{img_height}\" xruling=\"0\" yruling=\"0\" marginLeft=\"0\" papercolor=\"#FFFFFF\" rulecolor=\"#00000000\"> <g class=\"ruleline write-std-ruling write-scale-down\"", "pdf.getPage(0) width = page.mediaBox.getWidth() height = page.mediaBox.getHeight() aspect_ratio = height/width", "print(f\"Processing {page}/{n_pages}\", end='\\r') os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile')", "as f: base64_data = base64.b64encode(f.read()).decode('utf-8') tmp_svg = f'''<svg class=\"write-page\" color-interpolation=\"linearRGB\"", "for page in range(n_pages): print(f\"Processing {page}/{n_pages}\", end='\\r') os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page}", "width = page.mediaBox.getWidth() height = page.mediaBox.getHeight() aspect_ratio = height/width img_height", "write-v3\" width=\"{img_width}\" height=\"{img_height}\" xruling=\"0\" yruling=\"0\" marginLeft=\"0\" papercolor=\"#FFFFFF\" rulecolor=\"#00000000\"> <g class=\"ruleline", "</g> <image x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" xlink:href=\"data:image/png;base64,{base64_data}\"/> </g> </svg>''' new_page_height", "0 for page in range(n_pages): print(f\"Processing {page}/{n_pages}\", end='\\r') os.system(f'pdftoppm {pdf_path}", "/> <float name=\"marginLeft\" value=\"0\" /> <float name=\"xOffset\" value=\"-380.701752\" /> <float", "value=\"0\" /> </script> </defs> ''' pdf_path = sys.argv[1] pdf =", "rulecolor=\"#00000000\"> <g class=\"ruleline write-std-ruling write-scale-down\" fill=\"none\" stroke=\"none\" stroke-width=\"1\" shape-rendering=\"crispEdges\" vector-effect=\"non-scaling-stroke\">", "os.system('rm -rf /tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg', 'w') as f: f.write(svg) os.system(f'gzip", "with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg', 'w') as f: f.write(svg) os.system(f'gzip -S z {os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg')", "pdf = PyPDF2.PdfFileReader(pdf_path, \"rb\") img_width = 720 n_pages = pdf.getNumPages()", "value=\"2\" /> <int name=\"pageColor\" value=\"-1\" /> <int name=\"pageNum\" value=\"0\" />", "value=\"0\" /> <float name=\"xOffset\" value=\"-380.701752\" /> <float name=\"xRuling\" value=\"0\" />", "page = pdf.getPage(0) width = page.mediaBox.getWidth() height = page.mediaBox.getHeight() aspect_ratio", "</svg>''' new_page_height += (img_height+10) svg += tmp_svg svg += '''</svg>'''", "''' pdf_path = sys.argv[1] pdf = PyPDF2.PdfFileReader(pdf_path, \"rb\") img_width =", "sys import PyPDF2 svg = '''<svg id=\"write-document\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <rect", "+ 1 page = pdf.getPage(0) width = page.mediaBox.getWidth() height =", "1 page = pdf.getPage(0) width = page.mediaBox.getWidth() height = page.mediaBox.getHeight()", "height/width img_height = int(aspect_ratio * img_width) os.system('mkdir -p /tmp/pdf2write') new_page_height", "fill=\"#FFFFFF\" stroke=\"none\" x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" /> </g> <image x=\"0\"", "= pdf.getPage(0) width = page.mediaBox.getWidth() height = page.mediaBox.getHeight() aspect_ratio =", "/> <float name=\"yOffset\" value=\"1536.84216\" /> <float name=\"yRuling\" value=\"0\" /> </script>", "height = page.mediaBox.getHeight() aspect_ratio = height/width img_height = int(aspect_ratio *", "f: base64_data = base64.b64encode(f.read()).decode('utf-8') tmp_svg = f'''<svg class=\"write-page\" color-interpolation=\"linearRGB\" x=\"10\"", "<rect class=\"pagerect\" fill=\"#FFFFFF\" stroke=\"none\" x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" /> </g>", "</g> </svg>''' new_page_height += (img_height+10) svg += tmp_svg svg +=", "import sys import PyPDF2 svg = '''<svg id=\"write-document\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">", "+= tmp_svg svg += '''</svg>''' os.system('rm -rf /tmp/pdf2write') with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(\".\")[0]}.svg',", "<int name=\"pageNum\" value=\"0\" /> <int name=\"ruleColor\" value=\"0\" /> <float name=\"marginLeft\"", "/> </script> </defs> ''' pdf_path = sys.argv[1] pdf = PyPDF2.PdfFileReader(pdf_path,", "xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <g class=\"write-content write-v3\" width=\"{img_width}\" height=\"{img_height}\" xruling=\"0\" yruling=\"0\" marginLeft=\"0\" papercolor=\"#FFFFFF\"", "value=\"0\" /> <float name=\"yOffset\" value=\"1536.84216\" /> <float name=\"yRuling\" value=\"0\" />", "x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" /> </g> <image x=\"0\" y=\"0\" width=\"{img_width}\"", "f'''<svg class=\"write-page\" color-interpolation=\"linearRGB\" x=\"10\" y=\"{new_page_height+10}\" width=\"{img_width}px\" height=\"{img_height}px\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\"> <g", "stroke-width=\"1\" shape-rendering=\"crispEdges\" vector-effect=\"non-scaling-stroke\"> <rect class=\"pagerect\" fill=\"#FFFFFF\" stroke=\"none\" x=\"0\" y=\"0\" width=\"{img_width}\"", "-singlefile') with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f: base64_data = base64.b64encode(f.read()).decode('utf-8') tmp_svg", "base64 import os import sys import PyPDF2 svg = '''<svg", "= pdf.getNumPages() + 1 page = pdf.getPage(0) width = page.mediaBox.getWidth()", "class=\"ruleline write-std-ruling write-scale-down\" fill=\"none\" stroke=\"none\" stroke-width=\"1\" shape-rendering=\"crispEdges\" vector-effect=\"non-scaling-stroke\"> <rect class=\"pagerect\"", "stroke=\"none\" x=\"0\" y=\"0\" width=\"{img_width}\" height=\"{img_height}\" /> </g> <image x=\"0\" y=\"0\"", "width=\"100%\" height=\"100%\" fill=\"#808080\"/> <defs id=\"write-defs\"> <script type=\"text/writeconfig\"> <int name=\"docFormatVersion\" value=\"2\"", "/> <float name=\"xOffset\" value=\"-380.701752\" /> <float name=\"xRuling\" value=\"0\" /> <float", "os import sys import PyPDF2 svg = '''<svg id=\"write-document\" xmlns=\"http://www.w3.org/2000/svg\"", "tmp_svg = f'''<svg class=\"write-page\" color-interpolation=\"linearRGB\" x=\"10\" y=\"{new_page_height+10}\" width=\"{img_width}px\" height=\"{img_height}px\" xmlns=\"http://www.w3.org/2000/svg\"" ]
[ "set_parameter_value(self, name: str, value: ParameterValueType): param = self.get_parameter(name) param.value =", "' already added to this object') parameter = Parameter(name, value,", "get_parameter(self, name: str) -> Parameter: for parameter in self.parameters: if", "value_range: ParameterRangeType): if name in self._parameters: raise Exception('parameter named '", "= parameter def get_parameter(self, name: str) -> Parameter: for parameter", "was expected to be enum (error: 80a1d180)\") value = self.get_parameter_value(name)", "None: self._parameters[parameter.name] = parameter def get_parameter(self, name: str) -> Parameter:", "Exception('parameter named ' + name + ' already added to", "param_type, value_range) self._parameters[name] = parameter def add_parameter_object(self, parameter: Parameter) ->", "be float (error: f009d0ef)\") value = self.get_parameter_value(name) cast_value = cast(float,", "cast(float, value) return cast_value def get_enum_parameter_value(self, name: str) -> str:", "def set_parameter_value(self, name: str, value: ParameterValueType): param = self.get_parameter(name) param.value", "= parameter def add_parameter_object(self, parameter: Parameter) -> None: self._parameters[parameter.name] =", "param_type: str, value_range: ParameterRangeType): if name in self._parameters: raise Exception('parameter", "= {} super().__init__() def has_parameter(self, name: str) -> bool: return", "List[str] = cast(List[str], list_of_names) raise Exception('parameter named ' + name", "self._parameters: raise Exception('parameter named ' + name + ' already", "+ ', '.join(available_names)) def get_parameter_value(self, name: str) -> ParameterValueType: param", "f009d0ef)\") value = self.get_parameter_value(name) cast_value = cast(float, value) return cast_value", "param.type != Parameter.TYPE_ENUM: raise ValueError(f\"parameter {name} was expected to be", "name: str) -> bool: return name in self._parameters def add_parameter(self,", "Parameter, ParameterValueType, ParameterRangeType class HavingParameters: def __init__(self): self._parameters: Dict[str, Parameter]", "ParameterValueType, ParameterRangeType class HavingParameters: def __init__(self): self._parameters: Dict[str, Parameter] =", "= self.get_parameter(name) return param.value def get_float_parameter_value(self, name: str) -> float:", "list_of_names: List[str] = [p.name for p in self.parameters] # noinspection", "+ ' not found. Available: ' + ', '.join(available_names)) def", "has_parameter(self, name: str) -> bool: return name in self._parameters def", "name + ' not found. Available: ' + ', '.join(available_names))", "= self.get_parameter_value(name) cast_value = cast(float, value) return cast_value def get_enum_parameter_value(self,", "raise Exception('parameter named ' + name + ' already added", "str: param = self.get_parameter(name) if param.type != Parameter.TYPE_ENUM: raise ValueError(f\"parameter", "self._parameters[parameter.name] = parameter def get_parameter(self, name: str) -> Parameter: for", "self.get_parameter(name) if param.type != Parameter.TYPE_ENUM: raise ValueError(f\"parameter {name} was expected", "def has_parameter(self, name: str) -> bool: return name in self._parameters", "List[str] = [p.name for p in self.parameters] # noinspection PyTypeChecker", "param = self.get_parameter(name) if param.type != Parameter.TYPE_ENUM: raise ValueError(f\"parameter {name}", "value, param_type, value_range) self._parameters[name] = parameter def add_parameter_object(self, parameter: Parameter)", "name: str) -> ParameterValueType: param = self.get_parameter(name) return param.value def", "ValueError(f\"parameter {name} was expected to be enum (error: 80a1d180)\") value", "already added to this object') parameter = Parameter(name, value, param_type,", "self.parameters] # noinspection PyTypeChecker available_names: List[str] = cast(List[str], list_of_names) raise", "value_range) self._parameters[name] = parameter def add_parameter_object(self, parameter: Parameter) -> None:", "add_parameter(self, name: str, value: ParameterValueType, param_type: str, value_range: ParameterRangeType): if", "' + name + ' already added to this object')", "enum (error: 80a1d180)\") value = self.get_parameter_value(name) cast_value = cast(str, value)", "self.parameters: if parameter.name == name: return parameter list_of_names: List[str] =", "in self._parameters: raise Exception('parameter named ' + name + '", "' not found. Available: ' + ', '.join(available_names)) def get_parameter_value(self,", "if parameter.name == name: return parameter list_of_names: List[str] = [p.name", "cast(str, value) return cast_value def set_parameter_value(self, name: str, value: ParameterValueType):", "param = self.get_parameter(name) if param.type != Parameter.TYPE_FLOAT: raise ValueError(f\"parameter {name}", "name in self._parameters def add_parameter(self, name: str, value: ParameterValueType, param_type:", "Parameter(name, value, param_type, value_range) self._parameters[name] = parameter def add_parameter_object(self, parameter:", "Parameter.TYPE_FLOAT: raise ValueError(f\"parameter {name} was expected to be float (error:", "self._parameters def add_parameter(self, name: str, value: ParameterValueType, param_type: str, value_range:", "ParameterValueType, param_type: str, value_range: ParameterRangeType): if name in self._parameters: raise", "ParameterValueType): param = self.get_parameter(name) param.value = value @property def parameters(self)", "import Parameter, ParameterValueType, ParameterRangeType class HavingParameters: def __init__(self): self._parameters: Dict[str,", "ParameterRangeType class HavingParameters: def __init__(self): self._parameters: Dict[str, Parameter] = {}", "this object') parameter = Parameter(name, value, param_type, value_range) self._parameters[name] =", "+ name + ' not found. Available: ' + ',", "str, value: ParameterValueType, param_type: str, value_range: ParameterRangeType): if name in", "parameter list_of_names: List[str] = [p.name for p in self.parameters] #", "= self.get_parameter(name) if param.type != Parameter.TYPE_ENUM: raise ValueError(f\"parameter {name} was", "[p.name for p in self.parameters] # noinspection PyTypeChecker available_names: List[str]", "found. Available: ' + ', '.join(available_names)) def get_parameter_value(self, name: str)", "was expected to be float (error: f009d0ef)\") value = self.get_parameter_value(name)", "ParameterValueType: param = self.get_parameter(name) return param.value def get_float_parameter_value(self, name: str)", "Parameter.TYPE_ENUM: raise ValueError(f\"parameter {name} was expected to be enum (error:", "from typing import Dict, List, cast from py_headless_daw.project.parameter import Parameter,", "self._parameters: Dict[str, Parameter] = {} super().__init__() def has_parameter(self, name: str)", "value) return cast_value def set_parameter_value(self, name: str, value: ParameterValueType): param", "cast from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType class HavingParameters: def", "add_parameter_object(self, parameter: Parameter) -> None: self._parameters[parameter.name] = parameter def get_parameter(self,", "{name} was expected to be enum (error: 80a1d180)\") value =", "return cast_value def set_parameter_value(self, name: str, value: ParameterValueType): param =", "if param.type != Parameter.TYPE_ENUM: raise ValueError(f\"parameter {name} was expected to", "Parameter: for parameter in self.parameters: if parameter.name == name: return", "name: str) -> float: param = self.get_parameter(name) if param.type !=", "cast(List[str], list_of_names) raise Exception('parameter named ' + name + '", "return cast_value def get_enum_parameter_value(self, name: str) -> str: param =", "Parameter] = {} super().__init__() def has_parameter(self, name: str) -> bool:", "raise ValueError(f\"parameter {name} was expected to be enum (error: 80a1d180)\")", "parameter def add_parameter_object(self, parameter: Parameter) -> None: self._parameters[parameter.name] = parameter", "float: param = self.get_parameter(name) if param.type != Parameter.TYPE_FLOAT: raise ValueError(f\"parameter", "{name} was expected to be float (error: f009d0ef)\") value =", "def get_parameter_value(self, name: str) -> ParameterValueType: param = self.get_parameter(name) return", "named ' + name + ' already added to this", "bool: return name in self._parameters def add_parameter(self, name: str, value:", "', '.join(available_names)) def get_parameter_value(self, name: str) -> ParameterValueType: param =", "str) -> float: param = self.get_parameter(name) if param.type != Parameter.TYPE_FLOAT:", "noinspection PyTypeChecker available_names: List[str] = cast(List[str], list_of_names) raise Exception('parameter named", "def get_parameter(self, name: str) -> Parameter: for parameter in self.parameters:", "get_float_parameter_value(self, name: str) -> float: param = self.get_parameter(name) if param.type", "{} super().__init__() def has_parameter(self, name: str) -> bool: return name", "Exception('parameter named ' + name + ' not found. Available:", "-> bool: return name in self._parameters def add_parameter(self, name: str,", "def add_parameter_object(self, parameter: Parameter) -> None: self._parameters[parameter.name] = parameter def", "value: ParameterValueType): param = self.get_parameter(name) param.value = value @property def", "name + ' already added to this object') parameter =", "__init__(self): self._parameters: Dict[str, Parameter] = {} super().__init__() def has_parameter(self, name:", "-> ParameterValueType: param = self.get_parameter(name) return param.value def get_float_parameter_value(self, name:", "be enum (error: 80a1d180)\") value = self.get_parameter_value(name) cast_value = cast(str,", "in self._parameters def add_parameter(self, name: str, value: ParameterValueType, param_type: str,", "= self.get_parameter_value(name) cast_value = cast(str, value) return cast_value def set_parameter_value(self,", "raise ValueError(f\"parameter {name} was expected to be float (error: f009d0ef)\")", "HavingParameters: def __init__(self): self._parameters: Dict[str, Parameter] = {} super().__init__() def", "= self.get_parameter(name) param.value = value @property def parameters(self) -> List[Parameter]:", "Dict, List, cast from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType class", "param.type != Parameter.TYPE_FLOAT: raise ValueError(f\"parameter {name} was expected to be", "return name in self._parameters def add_parameter(self, name: str, value: ParameterValueType,", "added to this object') parameter = Parameter(name, value, param_type, value_range)", "list_of_names) raise Exception('parameter named ' + name + ' not", "ValueError(f\"parameter {name} was expected to be float (error: f009d0ef)\") value", "expected to be float (error: f009d0ef)\") value = self.get_parameter_value(name) cast_value", "name: str) -> Parameter: for parameter in self.parameters: if parameter.name", "param = self.get_parameter(name) param.value = value @property def parameters(self) ->", "+ name + ' already added to this object') parameter", "in self.parameters] # noinspection PyTypeChecker available_names: List[str] = cast(List[str], list_of_names)", "parameter = Parameter(name, value, param_type, value_range) self._parameters[name] = parameter def", "import Dict, List, cast from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType", "' + name + ' not found. Available: ' +", "def get_enum_parameter_value(self, name: str) -> str: param = self.get_parameter(name) if", "return parameter list_of_names: List[str] = [p.name for p in self.parameters]", "raise Exception('parameter named ' + name + ' not found.", "(error: f009d0ef)\") value = self.get_parameter_value(name) cast_value = cast(float, value) return", "= cast(str, value) return cast_value def set_parameter_value(self, name: str, value:", "str) -> bool: return name in self._parameters def add_parameter(self, name:", "-> Parameter: for parameter in self.parameters: if parameter.name == name:", "Parameter) -> None: self._parameters[parameter.name] = parameter def get_parameter(self, name: str)", "'.join(available_names)) def get_parameter_value(self, name: str) -> ParameterValueType: param = self.get_parameter(name)", "-> float: param = self.get_parameter(name) if param.type != Parameter.TYPE_FLOAT: raise", "parameter: Parameter) -> None: self._parameters[parameter.name] = parameter def get_parameter(self, name:", "== name: return parameter list_of_names: List[str] = [p.name for p", "get_enum_parameter_value(self, name: str) -> str: param = self.get_parameter(name) if param.type", "80a1d180)\") value = self.get_parameter_value(name) cast_value = cast(str, value) return cast_value", "self.get_parameter_value(name) cast_value = cast(str, value) return cast_value def set_parameter_value(self, name:", "parameter def get_parameter(self, name: str) -> Parameter: for parameter in", "self.get_parameter(name) param.value = value @property def parameters(self) -> List[Parameter]: return", "parameter in self.parameters: if parameter.name == name: return parameter list_of_names:", "str) -> str: param = self.get_parameter(name) if param.type != Parameter.TYPE_ENUM:", "= Parameter(name, value, param_type, value_range) self._parameters[name] = parameter def add_parameter_object(self,", "self.get_parameter(name) return param.value def get_float_parameter_value(self, name: str) -> float: param", "cast_value = cast(str, value) return cast_value def set_parameter_value(self, name: str,", "float (error: f009d0ef)\") value = self.get_parameter_value(name) cast_value = cast(float, value)", "param = self.get_parameter(name) return param.value def get_float_parameter_value(self, name: str) ->", "!= Parameter.TYPE_ENUM: raise ValueError(f\"parameter {name} was expected to be enum", "-> None: self._parameters[parameter.name] = parameter def get_parameter(self, name: str) ->", "PyTypeChecker available_names: List[str] = cast(List[str], list_of_names) raise Exception('parameter named '", "return param.value def get_float_parameter_value(self, name: str) -> float: param =", "str) -> ParameterValueType: param = self.get_parameter(name) return param.value def get_float_parameter_value(self,", "p in self.parameters] # noinspection PyTypeChecker available_names: List[str] = cast(List[str],", "+ ' already added to this object') parameter = Parameter(name,", "Dict[str, Parameter] = {} super().__init__() def has_parameter(self, name: str) ->", "from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType class HavingParameters: def __init__(self):", "name: str, value: ParameterValueType): param = self.get_parameter(name) param.value = value", "in self.parameters: if parameter.name == name: return parameter list_of_names: List[str]", "-> str: param = self.get_parameter(name) if param.type != Parameter.TYPE_ENUM: raise", "name: str, value: ParameterValueType, param_type: str, value_range: ParameterRangeType): if name", "# noinspection PyTypeChecker available_names: List[str] = cast(List[str], list_of_names) raise Exception('parameter", "= cast(float, value) return cast_value def get_enum_parameter_value(self, name: str) ->", "if param.type != Parameter.TYPE_FLOAT: raise ValueError(f\"parameter {name} was expected to", "List, cast from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType class HavingParameters:", "for parameter in self.parameters: if parameter.name == name: return parameter", "expected to be enum (error: 80a1d180)\") value = self.get_parameter_value(name) cast_value", "typing import Dict, List, cast from py_headless_daw.project.parameter import Parameter, ParameterValueType,", "def __init__(self): self._parameters: Dict[str, Parameter] = {} super().__init__() def has_parameter(self,", "class HavingParameters: def __init__(self): self._parameters: Dict[str, Parameter] = {} super().__init__()", "str, value_range: ParameterRangeType): if name in self._parameters: raise Exception('parameter named", "param.value = value @property def parameters(self) -> List[Parameter]: return list(self._parameters.values())", "to be float (error: f009d0ef)\") value = self.get_parameter_value(name) cast_value =", "self.get_parameter_value(name) cast_value = cast(float, value) return cast_value def get_enum_parameter_value(self, name:", "cast_value = cast(float, value) return cast_value def get_enum_parameter_value(self, name: str)", "ParameterRangeType): if name in self._parameters: raise Exception('parameter named ' +", "Available: ' + ', '.join(available_names)) def get_parameter_value(self, name: str) ->", "' + ', '.join(available_names)) def get_parameter_value(self, name: str) -> ParameterValueType:", "def add_parameter(self, name: str, value: ParameterValueType, param_type: str, value_range: ParameterRangeType):", "get_parameter_value(self, name: str) -> ParameterValueType: param = self.get_parameter(name) return param.value", "not found. Available: ' + ', '.join(available_names)) def get_parameter_value(self, name:", "def get_float_parameter_value(self, name: str) -> float: param = self.get_parameter(name) if", "str) -> Parameter: for parameter in self.parameters: if parameter.name ==", "= self.get_parameter(name) if param.type != Parameter.TYPE_FLOAT: raise ValueError(f\"parameter {name} was", "= cast(List[str], list_of_names) raise Exception('parameter named ' + name +", "param.value def get_float_parameter_value(self, name: str) -> float: param = self.get_parameter(name)", "to this object') parameter = Parameter(name, value, param_type, value_range) self._parameters[name]", "super().__init__() def has_parameter(self, name: str) -> bool: return name in", "value = self.get_parameter_value(name) cast_value = cast(float, value) return cast_value def", "name: str) -> str: param = self.get_parameter(name) if param.type !=", "(error: 80a1d180)\") value = self.get_parameter_value(name) cast_value = cast(str, value) return", "available_names: List[str] = cast(List[str], list_of_names) raise Exception('parameter named ' +", "str, value: ParameterValueType): param = self.get_parameter(name) param.value = value @property", "if name in self._parameters: raise Exception('parameter named ' + name", "= [p.name for p in self.parameters] # noinspection PyTypeChecker available_names:", "named ' + name + ' not found. Available: '", "to be enum (error: 80a1d180)\") value = self.get_parameter_value(name) cast_value =", "object') parameter = Parameter(name, value, param_type, value_range) self._parameters[name] = parameter", "!= Parameter.TYPE_FLOAT: raise ValueError(f\"parameter {name} was expected to be float", "name in self._parameters: raise Exception('parameter named ' + name +", "self._parameters[name] = parameter def add_parameter_object(self, parameter: Parameter) -> None: self._parameters[parameter.name]", "name: return parameter list_of_names: List[str] = [p.name for p in", "value) return cast_value def get_enum_parameter_value(self, name: str) -> str: param", "value: ParameterValueType, param_type: str, value_range: ParameterRangeType): if name in self._parameters:", "py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType class HavingParameters: def __init__(self): self._parameters:", "parameter.name == name: return parameter list_of_names: List[str] = [p.name for", "cast_value def get_enum_parameter_value(self, name: str) -> str: param = self.get_parameter(name)", "value = self.get_parameter_value(name) cast_value = cast(str, value) return cast_value def", "for p in self.parameters] # noinspection PyTypeChecker available_names: List[str] =", "cast_value def set_parameter_value(self, name: str, value: ParameterValueType): param = self.get_parameter(name)", "self.get_parameter(name) if param.type != Parameter.TYPE_FLOAT: raise ValueError(f\"parameter {name} was expected" ]
[ "a Region Of Interest, which may be either horizontal #", "+ 1 def valid(self): return self.start >= 0 and self.start", "- start + 1 def valid(self): return self.start >= 0", "def crop(self, spectrum): return spectrum[self.start:self.end+1] def contains(self, value): return self.start", "encapsulates a Region Of Interest, which may be either horizontal", "which may be either horizontal # (pixels) or vertical (rows/lines).", "Region Of Interest, which may be either horizontal # (pixels)", "class ROI: def __init__(self, start, end): self.start = start self.end", "def valid(self): return self.start >= 0 and self.start < self.end", "or vertical (rows/lines). class ROI: def __init__(self, start, end): self.start", "= start self.end = end self.len = end - start", "self.start < self.end def crop(self, spectrum): return spectrum[self.start:self.end+1] def contains(self,", "1 def valid(self): return self.start >= 0 and self.start <", "(pixels) or vertical (rows/lines). class ROI: def __init__(self, start, end):", "= end - start + 1 def valid(self): return self.start", "# This class encapsulates a Region Of Interest, which may", "start self.end = end self.len = end - start +", "and self.start < self.end def crop(self, spectrum): return spectrum[self.start:self.end+1] def", "self.len = end - start + 1 def valid(self): return", "self.end = end self.len = end - start + 1", "Of Interest, which may be either horizontal # (pixels) or", "spectrum): return spectrum[self.start:self.end+1] def contains(self, value): return self.start <= value", "< self.end def crop(self, spectrum): return spectrum[self.start:self.end+1] def contains(self, value):", "## # This class encapsulates a Region Of Interest, which", "ROI: def __init__(self, start, end): self.start = start self.end =", "valid(self): return self.start >= 0 and self.start < self.end def", "# (pixels) or vertical (rows/lines). class ROI: def __init__(self, start,", "self.start = start self.end = end self.len = end -", "either horizontal # (pixels) or vertical (rows/lines). class ROI: def", "end): self.start = start self.end = end self.len = end", "end self.len = end - start + 1 def valid(self):", "return spectrum[self.start:self.end+1] def contains(self, value): return self.start <= value <=", "start, end): self.start = start self.end = end self.len =", "class encapsulates a Region Of Interest, which may be either", "be either horizontal # (pixels) or vertical (rows/lines). class ROI:", "0 and self.start < self.end def crop(self, spectrum): return spectrum[self.start:self.end+1]", "self.start >= 0 and self.start < self.end def crop(self, spectrum):", "start + 1 def valid(self): return self.start >= 0 and", "end - start + 1 def valid(self): return self.start >=", ">= 0 and self.start < self.end def crop(self, spectrum): return", "vertical (rows/lines). class ROI: def __init__(self, start, end): self.start =", "return self.start >= 0 and self.start < self.end def crop(self,", "spectrum[self.start:self.end+1] def contains(self, value): return self.start <= value <= self.end", "may be either horizontal # (pixels) or vertical (rows/lines). class", "horizontal # (pixels) or vertical (rows/lines). class ROI: def __init__(self,", "crop(self, spectrum): return spectrum[self.start:self.end+1] def contains(self, value): return self.start <=", "Interest, which may be either horizontal # (pixels) or vertical", "(rows/lines). class ROI: def __init__(self, start, end): self.start = start", "__init__(self, start, end): self.start = start self.end = end self.len", "def __init__(self, start, end): self.start = start self.end = end", "self.end def crop(self, spectrum): return spectrum[self.start:self.end+1] def contains(self, value): return", "= end self.len = end - start + 1 def", "This class encapsulates a Region Of Interest, which may be" ]
[ "0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,", "0xF0, 0xFA, 0x0F, 0xC6, 0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00,", "A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x04,", "0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C, 0x81, 0xCC, 0xD2,", "be # included in all copies or substantial portions of", "0xDF, 0xE1, 0x9F, 0xEC, 0x7E, 0xE6, 0x73, 0x9C, 0xE7, 0x39,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0,", "0xE0, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "permission notice shall be # included in all copies or", "upmLCD.SSD1327(0, 0x3C); logoArr = [0x00, 0x00, 0x00, 0x00, 0x00, 0x20,", "0x00, 0x6C, 0xF3, 0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00,", "0x80, 0x01, 0xC0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F,", "0xC9, 0x2C, 0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48,", "KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "0x00, 0x00, 0x07, 0x80, 0x01, 0xC0, 0x08, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00, 0x00,", "charge, to any person obtaining # a copy of this", "#!/usr/bin/python # Author: <NAME> <<EMAIL>> # Copyright (c) 2015 Intel", "0x00, 0xFC, 0x30, 0x18, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00,", "AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM,", "0x00, 0x00, 0x00, 0x00, 0x0F, 0x87, 0xC0, 0x07, 0xC1, 0xF0,", "0x00, 0x00, 0x07, 0x80, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00,", "0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E, 0x40, 0x8D, 0xE7, 0x9E,", "shall be # included in all copies or substantial portions", "0x48, 0x40, 0xE2, 0xC9, 0x2C, 0x80, 0x84, 0xD2, 0x40, 0x8B,", "0x00, 0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF, 0xE1, 0xB9, 0xEC,", "# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8, 0x64, 0x00, 0x00, 0x00,", "0x00, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x80, 0x03, 0xE0, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x20, 0xB2, 0xCB, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7C,", "sell copies of the Software, and to # permit persons", "0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70, 0x89, 0xE7, 0x9E, 0x78,", "0xE0, 0x61, 0xD8, 0x66, 0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0,", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A", "0xE7, 0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE, 0x7E,", "0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB, 0x60, 0x80, 0x00, 0x00,", "0x00, 0x07, 0x80, 0x01, 0xC0, 0x08, 0x00, 0x00, 0x00, 0x00,", "0x07, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,", "sys import pyupm_i2clcd as upmLCD myLCD = upmLCD.SSD1327(0, 0x3C); logoArr", "0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00,", "WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "0xEC, 0x7E, 0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF,", "# # The above copyright notice and this permission notice", "publish, # distribute, sublicense, and/or sell copies of the Software,", "0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "as upmLCD myLCD = upmLCD.SSD1327(0, 0x3C); logoArr = [0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x30, 0x18,", "0x7C, 0xF3, 0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00,", "TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR", "0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,", "0x61, 0xD8, 0x66, 0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D,", "0x3F, 0x9F, 0xEC, 0xFE, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA,", "Copyright (c) 2015 Intel Corporation. # # Permission is hereby", "use, copy, modify, merge, publish, # distribute, sublicense, and/or sell", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x3E,", "0xE7, 0x81, 0xC7, 0x92, 0x70, 0x89, 0xE7, 0x9E, 0x78, 0x7C,", "THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "in range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x]) # If you don't set the", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "0x02, 0x00, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00,", "0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00,", "0x00, 0x00, 0x0F, 0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00,", "# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00,", "modify, merge, publish, # distribute, sublicense, and/or sell copies of", "0x61, 0xB0, 0x6D, 0xC3, 0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06,", "0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "OR IN CONNECTION # WITH THE SOFTWARE OR THE USE", "0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x03, 0xC0, 0x00,", "0x0F, 0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00,", "import time, signal, sys import pyupm_i2clcd as upmLCD myLCD =", "0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x01, 0xC0, 0x08,", "0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "to # the following conditions: # # The above copyright", "0x07, 0xE1, 0xE0, 0x07, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00,", "0x87, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xE1,", "0xE1, 0xE0, 0x07, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x6D, 0xC3, 0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86,", "0x9E, 0x40, 0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00, 0x00,", "without restriction, including # without limitation the rights to use,", "0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC3, 0xC0,", "appear jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96 * 96 / 8); for", "0x80, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "sublicense, and/or sell copies of the Software, and to #", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x04, 0x00,", "0x07, 0x80, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x40, 0xE2, 0xC9, 0x2C, 0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D,", "0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7, 0xF9, 0xFE, 0x1F,", "0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3C, 0x0F,", "0x00, 0x00, 0x00, 0x01, 0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00,", "myLCD = upmLCD.SSD1327(0, 0x3C); logoArr = [0x00, 0x00, 0x00, 0x00,", "0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x38, 0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00,", "0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF, 0xE1, 0xB9, 0xEC, 0xE7,", "0xFF, 0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83, 0xC3,", "0x61, 0xB0, 0x6D, 0x83, 0xC3, 0x61, 0x18, 0x46, 0x03, 0x86,", "0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x0F, 0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00, 0x00,", "0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "display to be white, the seeed logo will appear jagged", "0x80, 0x00, 0x00, 0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2,", "persons to whom the Software is furnished to do so,", "IN THE SOFTWARE. # Load i2clcd display module import time,", "0xC3, 0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E, 0xE6,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x03,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,", "(c) 2015 Intel Corporation. # # Permission is hereby granted,", "0x00, 0x00, 0x00] SeeedLogo = upmLCD.uint8Array(len(logoArr)) for x in range(len(logoArr)):", "0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80,", "0x00, 0x00, 0x00, 0x00, 0x47, 0x9E, 0x6F, 0x20, 0xB2, 0xF9,", "0x00, 0xF8, 0x00, 0x00, 0x40, 0x40, 0x02, 0x00, 0x00, 0x83,", "0xFE, 0x7F, 0x9F, 0xE7, 0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F,", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3,", "0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00,", "0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x60, 0x00, 0x7E,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80,", "0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00, 0x00,", "0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00,", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Load", "0xC9, 0x2C, 0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48,", "0x02, 0x00, 0x00, 0x82, 0x60, 0x00, 0x00, 0xF8, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x7C, 0x02, 0x00, 0x00, 0x82, 0x60, 0x00,", "0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86, 0x61, 0xB0,", "0x00, 0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x03, 0xC0,", "0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF8, 0x00, 0x00, 0x00, 0x00,", "all copies or substantial portions of the Software. # #", "0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0,", "0x18, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x9E, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x0F, 0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00,", "0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "will appear jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96 * 96 / 8);", "0x01, 0xF8, 0xF0, 0x0E, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00,", "including # without limitation the rights to use, copy, modify,", "0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6C,", "# without limitation the rights to use, copy, modify, merge,", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "0x03, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0, 0x00,", "0x0F, 0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00, 0x00, 0x00,", "0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x83,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x18, 0x30, 0xFC,", "0x00, 0x00, 0x00, 0x00, 0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x18, 0x30,", "0x02, 0x00, 0x06, 0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F,", "0x00, 0x00, 0x00, 0x00, 0x7C, 0x02, 0x00, 0x00, 0x82, 0x60,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x20,", "# Copyright (c) 2015 Intel Corporation. # # Permission is", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,", "0x00, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xDF, 0xE1,", "included in all copies or substantial portions of the Software.", "0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x82, 0x60, 0x00, 0x00, 0xF8, 0x00, 0x00, 0x40, 0x40, 0x02,", "0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x18, 0x30, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x7E, 0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF, 0xE1,", "0x8C, 0xD2, 0x40, 0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9,", "0xB2, 0xCB, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3,", "to do so, subject to # the following conditions: #", "jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96 * 96 / 8); for i", "0x0F, 0x00, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "notice shall be # included in all copies or substantial", "0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0,", "Software without restriction, including # without limitation the rights to", "0x20, 0x07, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00,", "0x8C, 0x00, 0x00, 0x40, 0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7,", "0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x20, 0x07, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00,", "0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x20, 0x07, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00,", "(the # \"Software\"), to deal in the Software without restriction,", "# The above copyright notice and this permission notice shall", "0x01, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] SeeedLogo", "0x92, 0x48, 0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E, 0x40,", "0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0x70, 0x1C, 0x3F, 0x00,", "in the Software without restriction, including # without limitation the", "# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT.", "0x02, 0x00, 0x00, 0x83, 0x60, 0x00, 0x00, 0x8C, 0x00, 0x00,", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "0x00, 0x00, 0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB, 0x60,", "0x3C); logoArr = [0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x08,", "0x00, 0x00, 0x00, 0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB,", "IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED,", "0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00, 0x00, 0x00, 0x00,", "of this software and associated documentation files (the # \"Software\"),", "subject to # the following conditions: # # The above", "OTHER DEALINGS IN THE SOFTWARE. # Load i2clcd display module", "0xD8, 0x66, 0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3,", "0x2D, 0x92, 0x48, 0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E,", "0x00, 0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83, 0xE0,", "0x00, 0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,", "0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00,", "0xF8, 0xF0, 0x0E, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,", "to use, copy, modify, merge, publish, # distribute, sublicense, and/or", "following conditions: # # The above copyright notice and this", "0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x38, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0xE0, 0x0F, 0x0F, 0x80,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x06, 0x00,", "conditions: # # The above copyright notice and this permission", "0x00, 0x00, 0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0,", "0x00] SeeedLogo = upmLCD.uint8Array(len(logoArr)) for x in range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x])", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x06,", "0x00, 0x00, 0xC0, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,", "0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F,", "0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40, 0x8B, 0xE7, 0xB0,", "0x0F, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x18, 0x30, 0xFC, 0x00,", "0x00, 0x00, 0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06,", "0x39, 0xCE, 0x1C, 0xDF, 0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61,", "If you don't set the display to be white, the", "Author: <NAME> <<EMAIL>> # Copyright (c) 2015 Intel Corporation. #", "0x79, 0x27, 0x80, 0x87, 0x9E, 0x40, 0x8D, 0xE7, 0x9E, 0x48,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80, 0x00,", "THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY", "0x00, 0x00, 0x83, 0x60, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x40,", "0xD2, 0x40, 0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C,", "0x6D, 0x83, 0xC3, 0x61, 0x18, 0x46, 0x03, 0x86, 0x18, 0x66,", "0xC6, 0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x79, 0xE7, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x20, 0x82,", "to be white, the seeed logo will appear jagged myLCD.setGrayLevel(12)", "obtaining # a copy of this software and associated documentation", "0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x03, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x80,", "0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x9E, 0x6F, 0x20,", "* 96 / 8); for i in range(12): myLCD.setCursor(i, 0)", "0x66, 0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3, 0x7C,", "= upmLCD.SSD1327(0, 0x3C); logoArr = [0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x6C,", "upmLCD.uint8Array(len(logoArr)) for x in range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x]) # If you", "LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR", "0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66, 0x1B, 0x86, 0x1C, 0x06,", "0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "free of charge, to any person obtaining # a copy", "0x87, 0x9E, 0x40, 0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00,", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,", "copy of this software and associated documentation files (the #", "0x00, 0x00, 0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E, 0x00, 0x00,", "0xCB, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3, 0xCF,", "copy, modify, merge, publish, # distribute, sublicense, and/or sell copies", "0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x9E, 0x6F, 0x20, 0xB2,", "0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x88,", "0x79, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7D, 0x9E, 0x68,", "0x07, 0x80, 0x01, 0xC0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48, 0x40, 0x62, 0xF9,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03,", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #", "is hereby granted, free of charge, to any person obtaining", "0x00, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE, 0x7E, 0x3F,", "0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44,", "0x00, 0x83, 0x60, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x40, 0x60,", "0x00, 0x00, 0x38, 0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00, 0x00,", "0xE0, 0x07, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "of charge, to any person obtaining # a copy of", "above copyright notice and this permission notice shall be #", "0x80, 0x01, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x07, 0x80,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x04, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00,", "0xC3, 0xF0, 0xFA, 0x0F, 0xC6, 0x3F, 0x9F, 0xEC, 0x7E, 0x00,", "0xF9, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x46, 0x9A, 0x61,", "0x00, 0x00, 0x7C, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x6C, 0xF3, 0xCF, 0x70, 0x9E, 0x79, 0xE7,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,", "0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,", "0x00, 0x38, 0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x0E, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x47, 0x9E, 0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00,", "# # Permission is hereby granted, free of charge, to", "0x00, 0x00, 0x00, 0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0, 0x08,", "DEALINGS IN THE SOFTWARE. # Load i2clcd display module import", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x20,", "0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0, 0x78, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x20, 0x82, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1, 0xF0,", "OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #", "permit persons to whom the Software is furnished to do", "copies of the Software, and to # permit persons to", "import pyupm_i2clcd as upmLCD myLCD = upmLCD.SSD1327(0, 0x3C); logoArr =", "0x0F, 0x86, 0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF,", "0x60, 0x00, 0x00, 0xF8, 0x00, 0x00, 0x40, 0x40, 0x02, 0x00,", "0x1F, 0x88, 0x21, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x80, 0x01, 0xE0,", "0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x80, 0x01, 0xE0, 0x38,", "0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3, 0x7C, 0x7F, 0xFF, 0xFF,", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "0x81, 0xC7, 0x92, 0x70, 0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2,", "0x00, 0x00, 0x00, 0x07, 0x80, 0x01, 0xC0, 0x08, 0x00, 0x00,", "0xE1, 0x9F, 0xEC, 0x7E, 0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE,", "ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE #", "0x00, 0x03, 0xE0, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "0x00, 0x00, 0x00, 0x00] SeeedLogo = upmLCD.uint8Array(len(logoArr)) for x in", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x06, 0x00, 0x00,", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR", "# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "0x0E, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,", "0x06, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3, 0x7C, 0x7F, 0xFF,", "and associated documentation files (the # \"Software\"), to deal in", "0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,", "or substantial portions of the Software. # # THE SOFTWARE", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x88, 0x21,", "0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x20, 0x82,", "0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40, 0x8B, 0xE7, 0xB0, 0x48,", "Load i2clcd display module import time, signal, sys import pyupm_i2clcd", "= [0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x08, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x02, 0x00, 0x00, 0x82,", "OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT", "# Author: <NAME> <<EMAIL>> # Copyright (c) 2015 Intel Corporation.", "0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x0F, 0x80, 0x03, 0xE0,", "0x9F, 0xEC, 0x7E, 0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C,", "0xE0, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07,", "0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x0F,", "0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00,", "0x83, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC3,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01,", "0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x6C, 0x00, 0x00,", "0x03, 0x86, 0x18, 0x66, 0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F,", "0x61, 0x20, 0xB2, 0xCB, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00,", "0x80, 0x8C, 0xD2, 0x40, 0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC3, 0xC0, 0x07, 0x87,", "0xB0, 0x6D, 0x83, 0xC3, 0x61, 0x18, 0x46, 0x03, 0x86, 0x18,", "0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83, 0xC3, 0x61, 0x18, 0x46,", "0xEC, 0xFE, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6,", "0xDF, 0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66, 0x1B,", "0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0,", "0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0, 0x78, 0x00,", "# Permission is hereby granted, free of charge, to any", "0x46, 0x03, 0x86, 0x18, 0x66, 0x61, 0xB0, 0x6D, 0xC3, 0xFE,", "0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00,", "0x48, 0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E, 0x40, 0x8D,", "0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x06, 0x00, 0x00, 0x00,", "0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F,", "0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,", "the Software is furnished to do so, subject to #", "0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0x70, 0x1C, 0x3F,", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "Corporation. # # Permission is hereby granted, free of charge,", "0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF0,", "0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7D, 0x9E,", "0x00, 0x1F, 0x88, 0x21, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x9A, 0x61, 0x20, 0xB2, 0xCB, 0x60, 0x80, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x02,", "0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F, 0x80, 0x00,", "0x00, 0x00, 0x82, 0x60, 0x00, 0x00, 0xF8, 0x00, 0x00, 0x40,", "without limitation the rights to use, copy, modify, merge, publish,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03,", "0x06, 0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0,", "0x47, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,", "0x66, 0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7, 0xF9,", "0xF8, 0x00, 0x00, 0x40, 0x40, 0x02, 0x00, 0x00, 0x83, 0x60,", "OR OTHER DEALINGS IN THE SOFTWARE. # Load i2clcd display", "# Load i2clcd display module import time, signal, sys import", "0x18, 0x46, 0x03, 0x86, 0x18, 0x66, 0x61, 0xB0, 0x6D, 0xC3,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00,", "0xFC, 0x30, 0x18, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x8B, 0x2D, 0x92, 0x48, 0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87,", "0x00, 0x00, 0x3E, 0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00, 0x00,", "0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6, 0x3F, 0x9F, 0xEC,", "0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48, 0x7D, 0xB3, 0x79,", "0x00, 0x00, 0x00, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F,", "0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18,", "0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00,", "merge, publish, # distribute, sublicense, and/or sell copies of the", "0x00, 0x00, 0x00, 0x0F, 0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00,", "limitation the rights to use, copy, modify, merge, publish, #", "0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F, 0xC0, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC4, 0x47,", "0xE7, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,", "0x40, 0x8B, 0x2D, 0x92, 0x48, 0x7D, 0xB3, 0x79, 0x27, 0x80,", "0x00, 0x82, 0x60, 0x00, 0x00, 0xF8, 0x00, 0x00, 0x40, 0x40,", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x00,", "PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,", "so, subject to # the following conditions: # # The", "0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00,", "0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86, 0x61, 0xB0, 0x6D,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00,", "FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO", "0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6, 0x3F, 0x9F,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07,", "do so, subject to # the following conditions: # #", "0x7C, 0xE2, 0xC9, 0x2C, 0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21,", "0x00, 0x3E, 0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0, 0x00, 0x00, 0x00, 0x00,", "0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB, 0x60, 0x80, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]", "0x38, 0x0F, 0x00, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00,", "the rights to use, copy, modify, merge, publish, # distribute,", "0x00, 0x00, 0x00, 0x20, 0x07, 0x80, 0x01, 0xE0, 0x08, 0x00,", "0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00,", "0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x38, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F, 0xC0,", "0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F,", "# If you don't set the display to be white,", "WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING", "0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0xE0,", "Software, and to # permit persons to whom the Software", "i in range(12): myLCD.setCursor(i, 0) myLCD.setGrayLevel(i) myLCD.write('Hello World') print \"Exiting\"", "SOFTWARE. # Load i2clcd display module import time, signal, sys", "the following conditions: # # The above copyright notice and", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80,", "Intel Corporation. # # Permission is hereby granted, free of", "0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x0F,", "0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07,", "0x00, 0x00, 0x00, 0x38, 0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00,", "0x0E, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "distribute, sublicense, and/or sell copies of the Software, and to", "0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0,", "0x83, 0x60, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x40, 0x60, 0xB7,", "0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86, 0x61, 0xB0, 0x6D, 0x83,", "0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "# \"Software\"), to deal in the Software without restriction, including", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN", "0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E, 0x00, 0x00, 0x00, 0x00,", "range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x]) # If you don't set the display", "0xC0, 0x07, 0x83, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC3, 0xC0, 0x07,", "SeeedLogo.__setitem__(x, logoArr[x]) # If you don't set the display to", "0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x46, 0x9A, 0x61, 0x20,", "be white, the seeed logo will appear jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo,", "this permission notice shall be # included in all copies", "0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF, 0xE1, 0xB9,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0,", "0x00, 0x00, 0x00, 0x0F, 0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00,", "0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C, 0x81, 0xCC,", "0x00, 0x00, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x3E, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0,", "0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00,", "0x00, 0x00, 0x01, 0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00,", "0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,", "0x0F, 0x83, 0xC0, 0x07, 0x83, 0xE0, 0x00, 0x00, 0x00, 0x00,", "0x07, 0x83, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,", "0x80, 0x00, 0x00, 0x00, 0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,", "0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F, 0x80,", "0xC3, 0x61, 0x18, 0x46, 0x03, 0x86, 0x18, 0x66, 0x61, 0xB0,", "0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF8,", "0x60, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "to whom the Software is furnished to do so, subject", "0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA,", "0x00, 0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0, 0x78,", "0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00,", "0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83, 0xE0, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x02, 0x00, 0x00,", "0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x07, 0x80,", "notice and this permission notice shall be # included in", "0xF0, 0x0E, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "OF OR IN CONNECTION # WITH THE SOFTWARE OR THE", "WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT", "0x00, 0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00,", "0x00, 0x00, 0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00, 0x00,", "0x70, 0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C, 0x81,", "# a copy of this software and associated documentation files", "0x00, 0x00, 0x00, 0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00,", "THE SOFTWARE. # Load i2clcd display module import time, signal,", "0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83, 0xE0, 0x00,", "0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7, 0xF9, 0xFE, 0x1F, 0xE6,", "0xE2, 0xC9, 0x2C, 0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92,", "0x00, 0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E, 0x00, 0x00, 0x00,", "8); for i in range(12): myLCD.setCursor(i, 0) myLCD.setGrayLevel(i) myLCD.write('Hello World')", "0x03, 0xE0, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E,", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0xE0, 0x0F, 0x0F,", "0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F, 0x80, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E,", "Software is furnished to do so, subject to # the", "0x00, 0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00,", "0xE7, 0x39, 0xCE, 0x1C, 0xDF, 0xE1, 0xB9, 0xEC, 0xE7, 0xE0,", "0x01, 0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x27, 0x80, 0x87, 0x9E, 0x40, 0x8D, 0xE7, 0x9E, 0x48, 0x00,", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, #", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xC0,", "0x00, 0x01, 0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00,", "0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE, 0x7E, 0x3F, 0x0F,", "0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00,", "0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x00,", "0x00, 0x00, 0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x07, 0x80, 0x03,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8,", "0x88, 0x21, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER", "person obtaining # a copy of this software and associated", "0x21, 0xB2, 0x48, 0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC4, 0x47, 0xE0, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00] SeeedLogo = upmLCD.uint8Array(len(logoArr)) for", "# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "0x9F, 0xEC, 0xFE, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F,", "IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING", "pyupm_i2clcd as upmLCD myLCD = upmLCD.SSD1327(0, 0x3C); logoArr = [0x00,", "0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x87, 0xC0,", "0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0,", "0xFE, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6, 0x3F,", "# included in all copies or substantial portions of the", "0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F, 0xC0, 0x00, 0x00,", "module import time, signal, sys import pyupm_i2clcd as upmLCD myLCD", "0x40, 0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C, 0x80,", "NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR", "0x00, 0x00, 0x00, 0x00, 0xC0, 0x06, 0x00, 0x00, 0x00, 0x00,", "0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x07, 0xC6,", "0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x46, 0x9A,", "0x92, 0x70, 0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C,", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "0x00, 0x00, 0x00, 0x7C, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00,", "0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,", "substantial portions of the Software. # # THE SOFTWARE IS", "0x00, 0x00, 0x00, 0x3E, 0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00,", "0x00, 0x00, 0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1, 0xF0, 0x00,", "0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "96 * 96 / 8); for i in range(12): myLCD.setCursor(i,", "0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "to any person obtaining # a copy of this software", "0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C, 0x81, 0xCC, 0xD2, 0x40,", "0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8,", "0xE6, 0x3F, 0x9F, 0xEC, 0xFE, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0,", "0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,", "the Software, and to # permit persons to whom the", "0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1, 0xF0, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x07, 0x80, 0x03, 0xC0, 0x00, 0x00, 0x00,", "0x01, 0xC0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,", "0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8, 0x64, 0x00,", "0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0, 0x78, 0x00, 0x00,", "0x30, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF8, 0x00, 0x00, 0x00,", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR", "0x00, 0x00, 0x00, 0x47, 0x9E, 0x6F, 0x20, 0xB2, 0xF9, 0xE7,", "of the Software, and to # permit persons to whom", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "0x03, 0xE0, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x0F, 0x80,", "0x00, 0x07, 0x80, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xDF,", "0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x60, 0x00,", "0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00,", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE", "0x00, 0x00, 0x00, 0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] SeeedLogo = upmLCD.uint8Array(len(logoArr))", "0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x18,", "0x00, 0x00, 0x7C, 0xF3, 0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90,", "96 / 8); for i in range(12): myLCD.setCursor(i, 0) myLCD.setGrayLevel(i)", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x01,", "0xC6, 0x61, 0xB0, 0x6D, 0x83, 0xC3, 0x61, 0x18, 0x46, 0x03,", "0x00, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1,", "0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7F, 0x18,", "SeeedLogo = upmLCD.uint8Array(len(logoArr)) for x in range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x]) #", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #", "rights to use, copy, modify, merge, publish, # distribute, sublicense,", "0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00,", "0x00, 0x00, 0x03, 0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00,", "/ 8); for i in range(12): myLCD.setCursor(i, 0) myLCD.setGrayLevel(i) myLCD.write('Hello", "0x00, 0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB, 0x60, 0x80,", "copyright notice and this permission notice shall be # included", "0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48, 0x7D, 0xB3,", "0x00, 0x00, 0x40, 0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92,", "0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "and/or sell copies of the Software, and to # permit", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x30,", "0x00, 0x44, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00,", "display module import time, signal, sys import pyupm_i2clcd as upmLCD", "0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00, 0x00, 0x00,", "to # permit persons to whom the Software is furnished", "0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E, 0xE6, 0x73, 0x9C,", "0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,", "OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES", "0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00, 0x00, 0x00,", "WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE,", "0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83,", "0x2C, 0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48, 0x40,", "0x86, 0x18, 0x66, 0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F,", "0x30, 0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80, 0x00, 0x00, 0x00,", "0x1C, 0xDF, 0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66,", "0x00, 0x60, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3, 0xCF, 0x70,", "this software and associated documentation files (the # \"Software\"), to", "0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00,", "0x7C, 0x02, 0x00, 0x00, 0x82, 0x60, 0x00, 0x00, 0xF8, 0x00,", "0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70, 0x89, 0xE7, 0x9E,", "0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF, 0xFF, 0x06,", "0x44, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x40, 0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x06, 0x0F, 0x86, 0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF,", "0x40, 0x02, 0x00, 0x00, 0x83, 0x60, 0x00, 0x00, 0x8C, 0x00,", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x01, 0xC0, 0x08, 0x00,", "is furnished to do so, subject to # the following", "0x20, 0xB2, 0xC8, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47,", "0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x60, 0x04, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x07, 0x80, 0x01, 0xE0,", "0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48, 0x7D, 0xB3, 0x79, 0x27,", "2015 Intel Corporation. # # Permission is hereby granted, free", "0x00, 0x03, 0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00,", "whom the Software is furnished to do so, subject to", "0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x6C, 0x00,", "0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E, 0x40, 0x8D, 0xE7,", "0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0x70,", "0x03, 0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00, 0x00,", "0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xC7, 0x92, 0x70, 0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9,", "# permit persons to whom the Software is furnished to", "0x68, 0x20, 0xB2, 0xC8, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x80, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x47, 0x9E, 0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00,", "0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x46,", "0x1F, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,", "0x00, 0x40, 0x40, 0x02, 0x00, 0x00, 0x83, 0x60, 0x00, 0x00,", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,", "0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3, 0xCF, 0x30,", "a copy of this software and associated documentation files (the", "0x20, 0x0F, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00,", "# the following conditions: # # The above copyright notice", "files (the # \"Software\"), to deal in the Software without", "0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "documentation files (the # \"Software\"), to deal in the Software", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01,", "0x02, 0x00, 0x06, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00,", "0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83, 0xC3, 0x61, 0x18,", "0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F,", "0xB2, 0xC8, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x9E,", "0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xFA, 0x0F, 0xC6, 0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C,", "0x80, 0x03, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "and this permission notice shall be # included in all", "0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F,", "0x2C, 0x80, 0x8C, 0xD2, 0x40, 0x8B, 0xE7, 0xB0, 0x48, 0x40,", "0x00, 0x00, 0x8C, 0x00, 0x00, 0x40, 0x60, 0xB7, 0x79, 0xE7,", "0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,", "COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR", "OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT", "0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00, 0x00,", "and to # permit persons to whom the Software is", "BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS", "0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80,", "0x00, 0x00, 0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0, 0x00, 0x00,", "0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x06, 0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3,", "0x00, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xE2, 0xC9, 0x2C, 0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2,", "0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E, 0xE6, 0x73,", "0x80, 0x03, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "[0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x08, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0, 0x08, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x02, 0x00,", "0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20,", "0x07, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xE0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x07,", "0xFF, 0xFF, 0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83,", "0x07, 0x80, 0x03, 0xE0, 0xF8, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x00,", "0x40, 0x40, 0x02, 0x00, 0x00, 0x83, 0x60, 0x00, 0x00, 0x8C,", "white, the seeed logo will appear jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96", "<NAME> <<EMAIL>> # Copyright (c) 2015 Intel Corporation. # #", "0x00, 0x0E, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x3E, 0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00,", "0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40, 0x8B, 0xE7,", "0xC0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x07,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80,", "0x00, 0x40, 0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70,", "0x00, 0x00, 0x00, 0x7C, 0xF3, 0xCF, 0x30, 0x9E, 0x79, 0xE7,", "0x6C, 0xF3, 0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00,", "0x9F, 0xE7, 0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE,", "0x80, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3, 0xCF, 0x30, 0x9E,", "0x86, 0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF, 0xFF,", "0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0, 0x38,", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION #", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #", "0x3C, 0x0F, 0x80, 0x01, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00,", "0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x0F, 0x80,", "0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00,", "0xB2, 0x48, 0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC4,", "0x38, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00, 0x00,", "AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "0x07, 0x87, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,", "0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66, 0x1B, 0x86, 0x1C,", "0x80, 0x87, 0x9E, 0x40, 0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00,", "myLCD.draw(SeeedLogo, 96 * 96 / 8); for i in range(12):", "0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00,", "seeed logo will appear jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96 * 96", "# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "0x2C, 0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48, 0x7D,", "0x18, 0x66, 0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7,", "0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x7F, 0x9F, 0xE7, 0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC,", "0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00,", "associated documentation files (the # \"Software\"), to deal in the", "0xFF, 0x06, 0x0F, 0x86, 0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x87, 0xC0, 0x07,", "in all copies or substantial portions of the Software. #", "0xFF, 0xFF, 0x06, 0x0F, 0x86, 0x61, 0xB0, 0x6D, 0x83, 0x3E,", "0x06, 0x61, 0xB0, 0x6D, 0xC3, 0x7C, 0x7F, 0xFF, 0xFF, 0xFF,", "copies or substantial portions of the Software. # # THE", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x0F, 0x80, 0x01,", "0x83, 0xC0, 0x07, 0x83, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3, 0xCF,", "0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00,", "0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66, 0x1B, 0x86,", "to deal in the Software without restriction, including # without", "0x00, 0x7C, 0x02, 0x00, 0x00, 0x82, 0x60, 0x00, 0x00, 0xF8,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3, 0xCF, 0x70, 0x9E,", "ARISING FROM, OUT OF OR IN CONNECTION # WITH THE", "0x00, 0x00, 0x7C, 0x02, 0x00, 0x00, 0x82, 0x60, 0x00, 0x00,", "0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "set the display to be white, the seeed logo will", "signal, sys import pyupm_i2clcd as upmLCD myLCD = upmLCD.SSD1327(0, 0x3C);", "0x00, 0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8, 0x64,", "for x in range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x]) # If you don't", "0x0F, 0xC6, 0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00,", "AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E, 0x0F, 0x80, 0x03,", "0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x6C,", "0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8, 0x64, 0x00, 0x00,", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,", "for i in range(12): myLCD.setCursor(i, 0) myLCD.setGrayLevel(i) myLCD.write('Hello World') print", "time, signal, sys import pyupm_i2clcd as upmLCD myLCD = upmLCD.SSD1327(0,", "0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF8, 0x00, 0x00,", "FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE", "0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x7C, 0xF3, 0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00,", "0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F,", "0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3, 0x7C, 0x7F,", "the display to be white, the seeed logo will appear", "0x83, 0x3E, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x07, 0xC6, 0x61,", "0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E, 0x07, 0x80,", "0x00, 0x00, 0x00, 0x00, 0x0F, 0xC3, 0xC0, 0x07, 0x87, 0xE0,", "0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00,", "0xCE, 0x1C, 0xDF, 0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8,", "myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96 * 96 / 8); for i in", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x3E, 0x00,", "logoArr[x]) # If you don't set the display to be", "0xC3, 0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86, 0x61,", "0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48, 0x40, 0x62, 0xF9, 0x2C,", "USE OR OTHER DEALINGS IN THE SOFTWARE. # Load i2clcd", "you don't set the display to be white, the seeed", "0x00, 0x00, 0x00, 0x00, 0x20, 0x07, 0x80, 0x01, 0xE0, 0x08,", "0xFB, 0x21, 0xB2, 0x48, 0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C,", "0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C, 0x80, 0x84,", "0x00, 0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0, 0x08,", "0x0F, 0x80, 0x01, 0xE0, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00,", "restriction, including # without limitation the rights to use, copy,", "0x00, 0x00, 0x6C, 0xF3, 0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80,", "0x00, 0x8C, 0x00, 0x00, 0x40, 0x60, 0xB7, 0x79, 0xE7, 0x81,", "hereby granted, free of charge, to any person obtaining #", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0,", "0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x03, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,", "0x78, 0x7C, 0xE2, 0xC9, 0x2C, 0x81, 0xCC, 0xD2, 0x40, 0xFB,", "logoArr = [0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x08, 0x00,", "the Software without restriction, including # without limitation the rights", "0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F, 0xC0, 0x00,", "0x00, 0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0, 0x00, 0x00, 0x00,", "0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x07,", "FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT", "0xF3, 0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00, 0x00,", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "don't set the display to be white, the seeed logo", "0x00, 0xC0, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x30, 0x18, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "furnished to do so, subject to # the following conditions:", "the seeed logo will appear jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96 *", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x80, 0x01,", "0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48, 0x40, 0x62,", "0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83, 0xE0, 0x00, 0x00,", "0x00, 0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1, 0xF0, 0x00, 0x00,", "0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C, 0x80, 0x84, 0xD2, 0x40,", "PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE", "OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR", "0x00, 0x00, 0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,", "any person obtaining # a copy of this software and", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,", "0x01, 0xE0, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3C,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF8, 0x3E, 0x00, 0x00,", "0x61, 0x18, 0x46, 0x03, 0x86, 0x18, 0x66, 0x61, 0xB0, 0x6D,", "0x60, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x40, 0x60, 0xB7, 0x79,", "0xC0, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x7C, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00,", "0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70, 0x89, 0xE7,", "= upmLCD.uint8Array(len(logoArr)) for x in range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x]) # If", "0x00, 0x00, 0x00, 0x00, 0x3E, 0x0F, 0x80, 0x03, 0xE0, 0x78,", "0x00, 0x0F, 0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00,", "0x07, 0x80, 0x03, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00,", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH", "0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0, 0xF8, 0x00,", "0x40, 0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70, 0x89,", "0x00, 0x00, 0x60, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "granted, free of charge, to any person obtaining # a", "upmLCD myLCD = upmLCD.SSD1327(0, 0x3C); logoArr = [0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00, 0x00,", "0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,", "0x00, 0x06, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x01, 0xE0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,", "0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x6C, 0x00,", "0x00, 0x00, 0x44, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00,", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "\"Software\"), to deal in the Software without restriction, including #", "software and associated documentation files (the # \"Software\"), to deal", "0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0,", "0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E, 0x00,", "0x20, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,", "0x00, 0x00, 0x47, 0x9E, 0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80,", "0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F,", "0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C, 0x80, 0x84, 0xD2,", "0x00, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x20, 0x82, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0x70, 0x1C,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x9E, 0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00, 0x00,", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF", "0x00, 0x00] SeeedLogo = upmLCD.uint8Array(len(logoArr)) for x in range(len(logoArr)): SeeedLogo.__setitem__(x,", "x in range(len(logoArr)): SeeedLogo.__setitem__(x, logoArr[x]) # If you don't set", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0xE0, 0x0F,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xC4, 0x47, 0xE0,", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F,", "0x48, 0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40, 0x8B,", "deal in the Software without restriction, including # without limitation", "0x00, 0x00, 0xF8, 0x00, 0x00, 0x40, 0x40, 0x02, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x60, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E,", "0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC,", "OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x87, 0xC0, 0x07, 0xC1,", "0x9E, 0x68, 0x20, 0xB2, 0xC8, 0x64, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x20, 0x08, 0x00, 0x00, 0x00, 0x00,", "0x7C, 0x00, 0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3, 0xCF, 0x70, 0x9E, 0x79,", "0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83, 0xC3, 0x61,", "0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x60,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x07, 0x80, 0x01,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x00] SeeedLogo = upmLCD.uint8Array(len(logoArr)) for x", "0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00, 0x00, 0x00,", "0xC8, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x9E, 0x6F,", "0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x03, 0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00,", "Permission is hereby granted, free of charge, to any person", "0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x87,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x01, 0xC0,", "HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "0xE7, 0xE0, 0x61, 0xD8, 0x66, 0x1B, 0x86, 0x1C, 0x06, 0x61,", "0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7, 0xF9, 0xFE,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xE0,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00] SeeedLogo =", "ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,", "0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00, 0x00,", "0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F, 0x80, 0x00, 0x00,", "The above copyright notice and this permission notice shall be", "0x21, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,", "logo will appear jagged myLCD.setGrayLevel(12) myLCD.draw(SeeedLogo, 96 * 96 /", "0x00, 0x00, 0x40, 0x40, 0x02, 0x00, 0x00, 0x83, 0x60, 0x00,", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY,", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07,", "0x00, 0x00, 0x00, 0x00, 0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00,", "0x83, 0xC3, 0x61, 0x18, 0x46, 0x03, 0x86, 0x18, 0x66, 0x61,", "0x00, 0x00, 0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8,", "0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00, 0x00, 0x00, 0x00,", "0xB0, 0x6D, 0xC3, 0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F,", "0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6, 0x3F, 0x9F, 0xEC, 0x7E,", "i2clcd display module import time, signal, sys import pyupm_i2clcd as", "0xF3, 0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00, 0x00,", "0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE, 0x7E, 0x3F, 0x0F, 0xC3,", "0x40, 0xFB, 0x21, 0xB2, 0x48, 0x40, 0x62, 0xF9, 0x2C, 0x80,", "0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F,", "0xC3, 0xFE, 0x7F, 0x9F, 0xE7, 0xF9, 0xFE, 0x1F, 0xE6, 0x3F,", "# distribute, sublicense, and/or sell copies of the Software, and", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x04, 0x00, 0x00,", "0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x03, 0xC0, 0x00, 0x00,", "0x1F, 0x07, 0x80, 0x03, 0xC1, 0xF0, 0x00, 0x00, 0x00, 0x00,", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "<<EMAIL>> # Copyright (c) 2015 Intel Corporation. # # Permission", "0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3, 0xCF, 0x30, 0x9E, 0x79,", "0x00, 0x00, 0x00, 0xC0, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,", "0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E, 0xE6, 0x73, 0x9C, 0xE7,", "IN CONNECTION # WITH THE SOFTWARE OR THE USE OR", "0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00,", "0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7D," ]
[ ".otsu import otsu_thresholding_method from .p_tile import p_tile_thresholding_method from .pun import", ".p_tile import p_tile_thresholding_method from .pun import pun_thresholding_method from .rosin import", ".gaussian import threshold_value_gaussian from .johannsen import johannsen_thresholding_method from .kapur import", "import threshold_value_gaussian from .johannsen import johannsen_thresholding_method from .kapur import kapur_thresholding_method", "from .bradley_roth import bradley_thresholding_method from .contrast import contrast_thresholding_method from .feng", "otsu_thresholding_method from .p_tile import p_tile_thresholding_method from .pun import pun_thresholding_method from", "from .gaussian import threshold_value_gaussian from .johannsen import johannsen_thresholding_method from .kapur", "p_tile_thresholding_method from .pun import pun_thresholding_method from .rosin import rosin_thresholding_method from", ".johannsen import johannsen_thresholding_method from .kapur import kapur_thresholding_method from .mean import", ".nick import nick_thresholding_method from .otsu import otsu_thresholding_method from .p_tile import", "nick_thresholding_method from .otsu import otsu_thresholding_method from .p_tile import p_tile_thresholding_method from", ".minimum_error import minimum_err_thresholding_method from .niblack import niblack_thresholding_method from .nick import", "johannsen_thresholding_method from .kapur import kapur_thresholding_method from .mean import threshold_value_mean from", ".mean import threshold_value_mean from .minimum_error import minimum_err_thresholding_method from .niblack import", "minimum_err_thresholding_method from .niblack import niblack_thresholding_method from .nick import nick_thresholding_method from", "import niblack_thresholding_method from .nick import nick_thresholding_method from .otsu import otsu_thresholding_method", "import singh_thresholding_method from .two_peaks import two_peaks_thresholding_method from .wolf import wolf_thresholding_method", "from .contrast import contrast_thresholding_method from .feng import feng_thresholding_method from .gaussian", "from .feng import feng_thresholding_method from .gaussian import threshold_value_gaussian from .johannsen", "from .pun import pun_thresholding_method from .rosin import rosin_thresholding_method from .sauvola", "threshold_value_gaussian from .johannsen import johannsen_thresholding_method from .kapur import kapur_thresholding_method from", "import rosin_thresholding_method from .sauvola import sauvola_thresholding_method from .singh import singh_thresholding_method", "bernsen_thresholding_method from .bradley_roth import bradley_thresholding_method from .contrast import contrast_thresholding_method from", "feng_thresholding_method from .gaussian import threshold_value_gaussian from .johannsen import johannsen_thresholding_method from", "from .minimum_error import minimum_err_thresholding_method from .niblack import niblack_thresholding_method from .nick", ".niblack import niblack_thresholding_method from .nick import nick_thresholding_method from .otsu import", "from .kapur import kapur_thresholding_method from .mean import threshold_value_mean from .minimum_error", "from .otsu import otsu_thresholding_method from .p_tile import p_tile_thresholding_method from .pun", "from .johannsen import johannsen_thresholding_method from .kapur import kapur_thresholding_method from .mean", "import bradley_thresholding_method from .contrast import contrast_thresholding_method from .feng import feng_thresholding_method", "sauvola_thresholding_method from .singh import singh_thresholding_method from .two_peaks import two_peaks_thresholding_method from", "threshold_value_mean from .minimum_error import minimum_err_thresholding_method from .niblack import niblack_thresholding_method from", ".sauvola import sauvola_thresholding_method from .singh import singh_thresholding_method from .two_peaks import", "import contrast_thresholding_method from .feng import feng_thresholding_method from .gaussian import threshold_value_gaussian", "from .rosin import rosin_thresholding_method from .sauvola import sauvola_thresholding_method from .singh", ".rosin import rosin_thresholding_method from .sauvola import sauvola_thresholding_method from .singh import", "import feng_thresholding_method from .gaussian import threshold_value_gaussian from .johannsen import johannsen_thresholding_method", "import johannsen_thresholding_method from .kapur import kapur_thresholding_method from .mean import threshold_value_mean", ".bradley_roth import bradley_thresholding_method from .contrast import contrast_thresholding_method from .feng import", "from .bernsen import bernsen_thresholding_method from .bradley_roth import bradley_thresholding_method from .contrast", "from .nick import nick_thresholding_method from .otsu import otsu_thresholding_method from .p_tile", "import sauvola_thresholding_method from .singh import singh_thresholding_method from .two_peaks import two_peaks_thresholding_method", ".singh import singh_thresholding_method from .two_peaks import two_peaks_thresholding_method from .wolf import", "import threshold_value_mean from .minimum_error import minimum_err_thresholding_method from .niblack import niblack_thresholding_method", ".bernsen import bernsen_thresholding_method from .bradley_roth import bradley_thresholding_method from .contrast import", ".feng import feng_thresholding_method from .gaussian import threshold_value_gaussian from .johannsen import", "bradley_thresholding_method from .contrast import contrast_thresholding_method from .feng import feng_thresholding_method from", "from .niblack import niblack_thresholding_method from .nick import nick_thresholding_method from .otsu", "from .sauvola import sauvola_thresholding_method from .singh import singh_thresholding_method from .two_peaks", ".kapur import kapur_thresholding_method from .mean import threshold_value_mean from .minimum_error import", "contrast_thresholding_method from .feng import feng_thresholding_method from .gaussian import threshold_value_gaussian from", "import otsu_thresholding_method from .p_tile import p_tile_thresholding_method from .pun import pun_thresholding_method", "niblack_thresholding_method from .nick import nick_thresholding_method from .otsu import otsu_thresholding_method from", "import bernsen_thresholding_method from .bradley_roth import bradley_thresholding_method from .contrast import contrast_thresholding_method", "import pun_thresholding_method from .rosin import rosin_thresholding_method from .sauvola import sauvola_thresholding_method", "import nick_thresholding_method from .otsu import otsu_thresholding_method from .p_tile import p_tile_thresholding_method", "pun_thresholding_method from .rosin import rosin_thresholding_method from .sauvola import sauvola_thresholding_method from", "import minimum_err_thresholding_method from .niblack import niblack_thresholding_method from .nick import nick_thresholding_method", "from .mean import threshold_value_mean from .minimum_error import minimum_err_thresholding_method from .niblack", "import p_tile_thresholding_method from .pun import pun_thresholding_method from .rosin import rosin_thresholding_method", "import kapur_thresholding_method from .mean import threshold_value_mean from .minimum_error import minimum_err_thresholding_method", ".pun import pun_thresholding_method from .rosin import rosin_thresholding_method from .sauvola import", "rosin_thresholding_method from .sauvola import sauvola_thresholding_method from .singh import singh_thresholding_method from", "from .p_tile import p_tile_thresholding_method from .pun import pun_thresholding_method from .rosin", "kapur_thresholding_method from .mean import threshold_value_mean from .minimum_error import minimum_err_thresholding_method from", "from .singh import singh_thresholding_method from .two_peaks import two_peaks_thresholding_method from .wolf", ".contrast import contrast_thresholding_method from .feng import feng_thresholding_method from .gaussian import" ]
[ "os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp') ## Using the database", "\"amqp://guest:guest@localhost:5672/\" #BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST = \"localhost\" #BROKER_PORT =", "= os.getenv('CELERY_RESULT_BACKEND', 'amqp') ## Using the database to store task", "( Queue('default', exchange=Exchange('default'), routing_key='default'), # Queue('aws_uploads', routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE =", "\"mongodb\" #CELERY_MONGODB_BACKEND_SETTINGS = { # \"host\": \"localhost\", # \"port\": 27017,", "os from kombu import Queue, Exchange ## Broker settings. BROKER_URL", "CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp') ## Using the database to store", "CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_IMPORTS = ('celeryservice.tasks',) #CELERY_RESULT_BACKEND", "= 'default' CELERY_IMPORTS = ('celeryservice.tasks',) #CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND", "'amqp') ## Using the database to store task state and", "import Queue, Exchange ## Broker settings. BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672')", "'celery' CELERY_DEFAULT_QUEUE = 'default' CELERY_QUEUES = ( Queue('default', exchange=Exchange('default'), routing_key='default'),", "routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE = 'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_DEFAULT_ROUTING_KEY =", "\"localhost\" #BROKER_PORT = 27017 #BROKER_TRANSPORT = 'mongodb' #BROKER_VHOST = 'celery'", "#BROKER_TRANSPORT = 'mongodb' #BROKER_VHOST = 'celery' CELERY_DEFAULT_QUEUE = 'default' CELERY_QUEUES", "('celeryservice.tasks',) #CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp') ##", "#BROKER_HOST = \"localhost\" #BROKER_PORT = 27017 #BROKER_TRANSPORT = 'mongodb' #BROKER_VHOST", "#BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST = \"localhost\" #BROKER_PORT = 27017", "= 'default' CELERY_QUEUES = ( Queue('default', exchange=Exchange('default'), routing_key='default'), # Queue('aws_uploads',", "task state and results. #CELERY_RESULT_BACKEND = \"mongodb\" #CELERY_MONGODB_BACKEND_SETTINGS = {", "27017 #BROKER_TRANSPORT = 'mongodb' #BROKER_VHOST = 'celery' CELERY_DEFAULT_QUEUE = 'default'", "#CELERY_RESULT_BACKEND = \"mongodb\" #CELERY_MONGODB_BACKEND_SETTINGS = { # \"host\": \"localhost\", #", "= 'celery' CELERY_DEFAULT_QUEUE = 'default' CELERY_QUEUES = ( Queue('default', exchange=Exchange('default'),", "= { # \"host\": \"localhost\", # \"port\": 27017, # \"database\":", "store task state and results. #CELERY_RESULT_BACKEND = \"mongodb\" #CELERY_MONGODB_BACKEND_SETTINGS =", "= 'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_IMPORTS =", "= 27017 #BROKER_TRANSPORT = 'mongodb' #BROKER_VHOST = 'celery' CELERY_DEFAULT_QUEUE =", "CELERY_IMPORTS = ('celeryservice.tasks',) #CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND',", "Queue, Exchange ## Broker settings. BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL", "Using the database to store task state and results. #CELERY_RESULT_BACKEND", "{ # \"host\": \"localhost\", # \"port\": 27017, # \"database\": \"celery\",", "settings. BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL = \"amqp://guest:guest@localhost:5672/\" #BROKER_URL =", "from kombu import Queue, Exchange ## Broker settings. BROKER_URL =", "#CELERY_MONGODB_BACKEND_SETTINGS = { # \"host\": \"localhost\", # \"port\": 27017, #", "#CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp') ## Using", "CELERY_QUEUES = ( Queue('default', exchange=Exchange('default'), routing_key='default'), # Queue('aws_uploads', routing_key='video.uploads'), )", "= 'direct' CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_IMPORTS = ('celeryservice.tasks',) #CELERY_RESULT_BACKEND =", "Queue('default', exchange=Exchange('default'), routing_key='default'), # Queue('aws_uploads', routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE = 'default'", "the database to store task state and results. #CELERY_RESULT_BACKEND =", "'mongodb' #BROKER_VHOST = 'celery' CELERY_DEFAULT_QUEUE = 'default' CELERY_QUEUES = (", "os.getenv('CELERY_RESULT_BACKEND', 'amqp') ## Using the database to store task state", "and results. #CELERY_RESULT_BACKEND = \"mongodb\" #CELERY_MONGODB_BACKEND_SETTINGS = { # \"host\":", "<filename>data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py import os from kombu import Queue, Exchange ## Broker", "## Broker settings. BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL = \"amqp://guest:guest@localhost:5672/\"", "Exchange ## Broker settings. BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL =", "= ( Queue('default', exchange=Exchange('default'), routing_key='default'), # Queue('aws_uploads', routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE", "\"host\": \"localhost\", # \"port\": 27017, # \"database\": \"celery\", # \"taskmeta_collection\":", "## Using the database to store task state and results.", "'direct' CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_IMPORTS = ('celeryservice.tasks',) #CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND',", "\"localhost\", # \"port\": 27017, # \"database\": \"celery\", # \"taskmeta_collection\": \"celery_taskmeta\",", "Queue('aws_uploads', routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE = 'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_DEFAULT_ROUTING_KEY", ") CELERY_DEFAULT_EXCHANGE = 'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_DEFAULT_ROUTING_KEY = 'default'", "'redis://guest@localhost:6379') #BROKER_HOST = \"localhost\" #BROKER_PORT = 27017 #BROKER_TRANSPORT = 'mongodb'", "Broker settings. BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL = \"amqp://guest:guest@localhost:5672/\" #BROKER_URL", "= \"localhost\" #BROKER_PORT = 27017 #BROKER_TRANSPORT = 'mongodb' #BROKER_VHOST =", "CELERY_DEFAULT_QUEUE = 'default' CELERY_QUEUES = ( Queue('default', exchange=Exchange('default'), routing_key='default'), #", "= \"amqp://guest:guest@localhost:5672/\" #BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST = \"localhost\" #BROKER_PORT", "exchange=Exchange('default'), routing_key='default'), # Queue('aws_uploads', routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE = 'default' CELERY_DEFAULT_EXCHANGE_TYPE", "CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_IMPORTS = ('celeryservice.tasks',) #CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis')", "'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_IMPORTS = ('celeryservice.tasks',)", "#BROKER_URL = \"amqp://guest:guest@localhost:5672/\" #BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST = \"localhost\"", "BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL = \"amqp://guest:guest@localhost:5672/\" #BROKER_URL = os.getenv('BROKER_URL',", "'redis') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp') ## Using the database to", "'default' CELERY_IMPORTS = ('celeryservice.tasks',) #CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND =", "= os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp') ## Using the", "state and results. #CELERY_RESULT_BACKEND = \"mongodb\" #CELERY_MONGODB_BACKEND_SETTINGS = { #", "results. #CELERY_RESULT_BACKEND = \"mongodb\" #CELERY_MONGODB_BACKEND_SETTINGS = { # \"host\": \"localhost\",", "# \"port\": 27017, # \"database\": \"celery\", # \"taskmeta_collection\": \"celery_taskmeta\", #}", "os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST = \"localhost\" #BROKER_PORT = 27017 #BROKER_TRANSPORT =", "= ('celeryservice.tasks',) #CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis') CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp')", "CELERY_DEFAULT_EXCHANGE = 'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct' CELERY_DEFAULT_ROUTING_KEY = 'default' CELERY_IMPORTS", "routing_key='default'), # Queue('aws_uploads', routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE = 'default' CELERY_DEFAULT_EXCHANGE_TYPE =", "= 'mongodb' #BROKER_VHOST = 'celery' CELERY_DEFAULT_QUEUE = 'default' CELERY_QUEUES =", "to store task state and results. #CELERY_RESULT_BACKEND = \"mongodb\" #CELERY_MONGODB_BACKEND_SETTINGS", "# \"host\": \"localhost\", # \"port\": 27017, # \"database\": \"celery\", #", "'default' CELERY_QUEUES = ( Queue('default', exchange=Exchange('default'), routing_key='default'), # Queue('aws_uploads', routing_key='video.uploads'),", "# Queue('aws_uploads', routing_key='video.uploads'), ) CELERY_DEFAULT_EXCHANGE = 'default' CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'", "= \"mongodb\" #CELERY_MONGODB_BACKEND_SETTINGS = { # \"host\": \"localhost\", # \"port\":", "os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL = \"amqp://guest:guest@localhost:5672/\" #BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST", "import os from kombu import Queue, Exchange ## Broker settings.", "kombu import Queue, Exchange ## Broker settings. BROKER_URL = os.getenv('BROKER_URL',", "= os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST = \"localhost\" #BROKER_PORT = 27017 #BROKER_TRANSPORT", "#BROKER_VHOST = 'celery' CELERY_DEFAULT_QUEUE = 'default' CELERY_QUEUES = ( Queue('default',", "= os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672') #BROKER_URL = \"amqp://guest:guest@localhost:5672/\" #BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379')", "#BROKER_PORT = 27017 #BROKER_TRANSPORT = 'mongodb' #BROKER_VHOST = 'celery' CELERY_DEFAULT_QUEUE", "database to store task state and results. #CELERY_RESULT_BACKEND = \"mongodb\"", "'amqp://guest:guest@localhost:5672') #BROKER_URL = \"amqp://guest:guest@localhost:5672/\" #BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379') #BROKER_HOST =" ]
[ "load client-secrets.json file ...\") secrets_file, cache_file = get_client_secret_filenames() sheets =", "found\" if first is None else first[COL_DATE]) print(\"Last:\", \"<last> not", "local_minutes = int(parts[1]) if half_it: local_hours = local_hours / 2", "enable debug output\"\"\") print(\"\") print(\"Trying to load client-secrets.json file ...\")", "> 1: result += '\\n* ' + sub_tasks[0] # main", "60).zfill(2) total_duration = \"%s:%s\" % (total_hours, total_minutes) test_duration = duration", "not AS_CSV: print(\"*\" * 50) print(\"\") print(\"Valid hours entries: %s\\t[required", "total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2) total_duration = \"%s:%s\" % (total_hours,", "= row[COL_TIME_START] if max_cols >= COL_TIME_START else None time_end =", "= 0, 0 if not AS_CSV: print(\"*\" * 50) print(\"\")", "'' else arg load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name) print(\"Done.\") if __name__", "continue if DEBUG: print(\"task: %s\" % (t)) print(\"groups: %s\" %", "3 COL_LUNCH = 4 COL_TIME = 5 # includes lunch", "60) + (6 * half_hours) total_time_minutes_from_tasks += minutes other_lines =", "import arrow from gsheets import Sheets CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG", "use_date = other_date rows = load_first_sheet_rows(api, timesheet_url, use_date) timesheet =", "= os.path.join(os.getcwd(), \"%s.csv\" % (arg_date)) print(\"\") print(\"Found (%d) entries for", "0 work_hours = 0 work_minutes = 0 days = 0", "the row with the first column that has today's date", "from gsheets import Sheets CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG = os.environ.get('DEBUG',", "8 hours of work, but on half days it is", "arrow.parser.ParserError: other_date = today use_date = other_date rows = load_first_sheet_rows(api,", "timesheet.py 20201130\") print(\"\") print(\"Available commands:\") print(\"- stats: show summed up", "\"\" for index, hour in enumerate(hours): date = dates[index] local_hours,", "%s\" % (csv_filename)) def calc_daily_hours_for_month(api, timesheet_url, arg_date): rows, date =", "dates.append(date) if first is None: first = row else: last", "'\\n* ' + sub_tasks[0] # main task for sub_task in", "None if time_start is None or time_end is None or", "# 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") for row in filtered: max_cols", "total [%(total_hours)s:%(total_minutes)s]) msg = \"\"\" [Daily Report] %(date)s WFH: %(wfh_info)s", "last[COL_DATE]) print(\"\") print(\"Total time in %s: %s\" % (date, total_time))", "found_row[idx].strip() if task: t = task.split('\\n')[0] if '\\n' in task", "else check_row[COL_WORK_FROM_HOME] wfh = wfh.replace(\"Mon\", \"Monday\") wfh = wfh.replace(\"Tue\", \"Tuesday\")", "time_start is None or time_end is None or date is", "# return None def parse_hours(val): try: return arrow.get(val, \"HH:mm\") except", "WFH: %(wfh_info)s Hi, Daily Report for Date: %(date)s %(tasks)s %(notes)s", "= str(minutes % 60).zfill(2) lunch_hours = str(int(float(hours)) - days).zfill(2) print(\"\")", "= str(int(total_time_minutes_from_tasks / 60)).zfill(2) total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2) total_duration", "print(\"- csv: task breakdown for the month and time spend", "print(\"\") print(\"\") # Time: %(start)s - %(end)s (%(duration)s hours total", "None or time_end is None or date is None: continue", "or \"OFFICE\" in notes.upper(): extra_info += \" - Commute to", "(len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or \"\").lower().startswith(\"fr\"): break check_start_index +=", "= 6 def calc(hour, half_it=False, split_char = \":\"): parts =", "60) total_minutes = total_minutes % 60 total_time = \"%d:%d hours:minutes\"", "task else task try: g = r.match(t).groups() except Exception as", "} print(\"Total time for all tasks (%s): %s - %s:%s\"", "if start_val in SPECIAL_VALUES: print(\"You forgot to add your start", "days worked: %s\" % str(days)) print(\"Total hours: %s:%s (with 1", "time.\") return None #if max_cols >= COL_NOTES: # print(\"No notes/tasks", "\"start\": start, \"end\": end, \"duration\": duration, \"wfh_info\": wfh_info, \"tasks\": format_tasks(tasks)", "< COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME] wfh = wfh.replace(\"Mon\", \"Monday\") wfh =", "= csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL) # f.writerow(['<NAME>', 'Accounting', 'November']) f.writerow([\"username\",", "and minutes for the given/current month\") print(\" use \\\"CSV=1 python", "entry found for %s\" % use_date) def get_timesheet_for_date(rows, date, user_full_name):", "task: sub_tasks = task.split('\\n') if len(sub_tasks) > 1: result +=", "'YYYYMMDD') if the_date.weekday() in [SATURDAY, SUNDAY]: extra_info += \" -", "day_type in SPECIAL_VALUES: time = day_type hours.append(time) dates.append(date) continue elif", "day off, then only count half the time half_day =", "end_val in SPECIAL_VALUES: print(\"You forgot to add your end time.\")", "total_duration != test_duration: print(\"\") print(\"\") print(\"The task times do not", "rows = sheet.values() return rows def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name):", "in rows if row and str(row[COL_DATE]) == date] if result_rows", ">= COL_MOVE else None notes = row[COL_NOTES] if max_cols >=", "= [row for row in rows if row and str(row[COL_DATE]).startswith(date)]", "[task_number, task_details, task_duration] = g hours, half_hours = calc(task_duration.replace(\"h\", \"\"),", "%(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s]) msg = \"\"\" [Daily Report]", "def get_client_secret_filenames(): filename = os.path.join(CURRENT_PATH, \"client-secrets.json\") cachefile = os.path.join(CURRENT_PATH, \"client-secrets-cache.json\")", "1 is_same_day = None if check_start_index != found_index: # print(\"HA!", "described here: https://github.com/xflr6/gsheets#quickstart\") return filename, cachefile def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')):", "None or date is None: continue tasks = [] for", "2 local_minutes = local_minutes / 2 return local_hours, local_minutes except:", "= wfh.replace(\"Mon\", \"Monday\") wfh = wfh.replace(\"Tue\", \"Tuesday\") wfh = wfh.replace(\"Wed\",", "only count half the time half_day = 'half' in row[COL_WORK_FROM_HOME]", "= row[COL_NOTES] if max_cols >= COL_NOTES else \"\" if time_start", "as f: f = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL) # f.writerow(['<NAME>',", "for now wfh_info = \"at all times, unless mentioned otherwise", "details = \"\"): work_type = \"Meeting\" if \"meeting\" in details.lower()", "= wfh.replace(\", \", \",\").replace(\",\", \" and \") wfh_extra = \"Next", "for the month and time spend on each task\") print(\"\")", "None for row in filtered: max_cols = len(row) time =", "HR\") print(\"- csv: task breakdown for the month and time", "= row else: last = row total_hours, total_minutes, total_time =", "print(\" %s: Off, because %s\" % (worked_date, hours[index])) else: pass", "+= local_m actual_h = work_hours # 330 minutes = 6", "print(\"Found (%d) entries for date %s!\" % (len(filtered), date)) minutes", "str(total_time_minutes_from_tasks % 60).zfill(2) total_duration = \"%s:%s\" % (total_hours, total_minutes) test_duration", "half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day: half_days[date] = time", "result = 'Tasks:\\n' for task in tasks: if '\\n' in", "= None for row in filtered: max_cols = len(row) time", "hours[index] in SPECIAL_VALUES: if not AS_CSV: print(\" %s: Off, because", "for spreadsheet URL...\") user_full_name = os.environ.get('USER_FULL_NAME', \"\").strip() if not user_full_name:", "of %s\" % (date, \"whatever\" if time_start == 0 else", "first column that has today's date in it result_rows =", "rows = load_first_sheet_rows(api, timesheet_url, use_date) timesheet = get_timesheet_for_date(rows, use_date, user_full_name)", "for note in notes: result += '\\n* ' + note", "split_char = \":\"): parts = str(hour).split(split_char) try: local_hours = int(parts[0])", "I took half a day off, then only count half", "date is None: continue start_hours, start_minutes = calc(time_start) end_hours, end_minutes", "range(COL_TASKS_START, max_cols): task = row[idx].strip() if task: tasks.append(task) if len(tasks)", "% len(hours)) deduct_work_hours = 0 work_hours = 0 work_minutes =", "%s\" % (task_number, task_details[:-1].strip()) w(task_number, minutes, details.strip()) print(\"\") print(\"CSV output", "variable TIMESHEET_URL for spreadsheet URL...\") timesheet_url = os.environ.get('TIMESHEET_URL', \"\").strip() if", "date_to_use = \"read today\" if arg == '' else arg", "-*- # # from __future__ import print_function import csv import", "time half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day: half_days[date] =", "= row[idx].strip() if task: tasks.append(task) if len(tasks) == 0: print(\"%s:", "environment variable accordingly.\") # print(\"Checking environment variable USER_FULL_NAME for spreadsheet", "== \"1\" AS_CSV = os.environ.get('CSV', \"0\") == \"1\" COL_DATE =", "fix your sheet!\" % (len(result_rows), date)) return None found_row =", "\"\")) if not AS_CSV: print(\"\") print(\"First:\", \"<first> not found\" if", "print(\"\") print(\"CSV output to: %s\" % (csv_filename)) def calc_daily_hours_for_month(api, timesheet_url,", "print(\"\") print(\"Available commands:\") print(\"- stats: show summed up hours and", "else time_start)) continue extra_info = \"\" the_date = arrow.get(str(date), 'YYYYMMDD')", "%s!\" % (len(filtered), date)) dates, hours = [], [] half_days", "os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG = os.environ.get('DEBUG', \"0\") == \"1\" AS_CSV = os.environ.get('CSV',", "* 60 minutes_day += end_minutes - start_minutes minutes += minutes_day", "+= '\\n* ' + sub_tasks[0] # main task for sub_task", "- work_hours_for_the_day) expected_minutes_accumulated_total = expected_hours_accumulated_total * 60 # hours[index] is", "_load_sheet_data(api, timesheet_url, arg_date) filtered = [row for row in rows", "\"notes\": format_notes(notes) if notes else \"\", \"total_hours\": total_hours, \"total_minutes\": total_minutes,", "+= \" - Commute to office\" minutes_day = abs(end_hours -", "arrow.get(val, \"H:mm\") start = parse_hours(start_val).format(\"HH:mm\") end = parse_hours(end_val).format(\"HH:mm\") duration =", "output\"\"\") print(\"\") print(\"Trying to load client-secrets.json file ...\") secrets_file, cache_file", "AIScanRobo every week [ # 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") total_time_minutes_from_tasks", "COL_TIME = 5 # includes lunch COL_TIME_FIXED = 6 #", "days = 0 for row in filtered: max_cols = len(row)", "from home # days were were selected weekday = (found_row[COL_WEEKDAY]", "total_duration = \"%s:%s\" % (total_hours, total_minutes) test_duration = duration if", "total_minutes) test_duration = duration if len(test_duration) <= 4: test_duration =", "given month filtered = [row for row in rows if", "raise Exception(\"Please set the TIMESHEET_URL environment variable accordingly.\") # print(\"Checking", "for idx in range(COL_TASKS_START, max_cols): task = row[idx].strip() if task:", "from __future__ import print_function import csv import os import re", "as stats, except ready to email to HR\") print(\"- csv:", "60) actual_m = work_minutes % 60 if AS_CSV: print(\"%s;%s;\" %", "now = arrow.now() date = now.format('YYYYMM') rows = load_first_sheet_rows(api, timesheet_url,", "(total_hours, total_minutes) expected = 0 actual_h, actual_m = 0, 0", "parse_hours(end_val).format(\"HH:mm\") duration = str(duration_val) notes_str = found_row[COL_NOTES] notes = notes_str.split('\\n')", "2 return local_hours, local_minutes except: if len(parts) == 1: try:", "no_lunch = str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2) print(\"%s: %s to", "Date: %(date)s %(tasks)s %(notes)s Kind regards, %(user_full_name)s \"\"\".strip() % {", "AIScanRobo every week [ # 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") for", "= str(hour).split(split_char) try: local_hours = int(parts[0]) local_minutes = int(parts[1]) if", "(len(filtered), date)) minutes = 0 days = 0 for row", "%s = %s (without lunch: %s)%s\" % (date, str(time_start).zfill(2), str(time_end).zfill(2),", "+ ':' + str(minutes_day).zfill(2) print(\"%s: %s to %s = %s", "print(\"You forgot to add your end time.\") return None #if", "(t, str(ex))) continue if DEBUG: print(\"task: %s\" % (t)) print(\"groups:", "None or not filtered: return None print(\"\") print(\"Found (%d) entries", "print(\"%s: %s to %s = %s (without lunch: %s)%s\" %", "1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") for row in filtered: max_cols =", "t = task.split('\\n')[0] if '\\n' in task else task try:", "quotechar='\"', quoting=csv.QUOTE_MINIMAL) # f.writerow(['<NAME>', 'Accounting', 'November']) f.writerow([\"username\", \"date\", \"task\", \"duration\",", "print(timesheet) print(\"\\n\") else: print(\"No entry found for %s\" % use_date)", "to %s = %s (without lunch: %s)%s\" % (date, str(time_start).zfill(2),", "10 SPECIAL_VALUES = [\"sick\", \"ab\", \"off\", \"wfh\", \"hol\"] SATURDAY =", "print(\"Checking environment variable USER_FULL_NAME for spreadsheet URL...\") user_full_name = os.environ.get('USER_FULL_NAME',", "is only half of 8, aka 4. work_hours_for_the_day = 8", "60 total_time_for_date = str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2) days +=", "want to update AIScanRobo every week [ # 1h r", "calc(hour, date in half_days) total_hours += local_hours total_minutes += local_minutes", "% (hours, half_hours, duration, minutes)) details = \"%s %s\" %", "% len(g)) [task_number, task_details, task_duration] = g hours, half_hours =", "'' result = 'Tasks:\\n' for task in tasks: if '\\n'", "no_lunch, extra_info)) hours = str(minutes / 60).zfill(2) minutes = str(minutes", "in enumerate(hours): date = dates[index] local_hours, local_minutes = calc(hour, date", "result += '\\n\\t' + sub_task result += '\\n' else: result", "below\" # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167 As a developer,", "day_type hours.append(time) dates.append(date) continue elif not tasks: continue # If", "days\" # 2021-01-04 just make this the default for now", "spreadsheet URL...\") user_full_name = os.environ.get('USER_FULL_NAME', \"\").strip() if not user_full_name: print(\"Warning:", "add up! Tasks vs time entered: %s != %s\" %", "date = dates[index] local_hours, local_minutes = calc(hour, date in half_days)", "filtered: return None csv_filename = os.path.join(os.getcwd(), \"%s.csv\" % (arg_date)) print(\"\")", ">= COL_DATE else None if day_type is None: continue if", "expected = 0 actual_h, actual_m = 0, 0 if not", "gsheets import Sheets CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG = os.environ.get('DEBUG', \"0\")", "to add your end time.\") return None #if max_cols >=", "half_day: half_days[date] = time hours.append(time) dates.append(date) if first is None:", "found_row[COL_NOTES] notes = notes_str.split('\\n') # check the previous Friday entry", "your sheet!\" % (len(result_rows), date)) return None found_row = result_rows[0]", "0 else time_start)) continue extra_info = \"\" the_date = arrow.get(str(date),", "%s to %s = %s (without lunch: %s)%s\" % (date,", "3 else sys.argv[2].strip() arg = \"read today\" if len(sys.argv) <", "a day off, then only count half the time half_day", "def parse_hours(val): try: return arrow.get(val, \"HH:mm\") except arrow.parser.ParserError: return arrow.get(val,", "print(\"Checking environment variable TIMESHEET_URL for spreadsheet URL...\") timesheet_url = os.environ.get('TIMESHEET_URL',", "timesheet_url, date) date_str = str(date.format('YYYYMM')) return (rows, date_str) def export_csv(api,", "print(\"\\n\\n\") print(\"Timesheet for %s\" % (use_date)) print(timesheet) print(\"\\n\") else: print(\"No", "half the time half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day:", "= now.format('YYYYMM') rows = load_first_sheet_rows(api, timesheet_url, date) date_str = str(date.format('YYYYMM'))", "= \"read today\" if arg == '' else arg load_sheet_and_read_data(sheets,", "tasks.append(\"%s %s\\n%s\" % (task_number.strip(), task_details[:-2].strip(), '\\n'.join(other_lines))) def format_tasks(tasks): if not", "found_index): check_row = rows[check_start_index] if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY]", "work_hours += local_h work_minutes += local_m actual_h = work_hours #", "= 1 COL_TIME_START = 2 COL_TIME_END = 3 COL_LUNCH =", "wfh = wfh.replace(\"Thu\", \"Thursday\") wfh = wfh.replace(\"Fri\", \"Friday\") wfh =", "if day_type is None: continue if day_type in SPECIAL_VALUES: time", "= parse_hours(end_val).format(\"HH:mm\") duration = str(duration_val) notes_str = found_row[COL_NOTES] notes =", "% (date)) sheets = api.get(timesheet_url) sheet = sheets.sheets[0] print(u\"Timesheet [%s]", "= [] for idx in range(COL_TASKS_START, max_cols): task = found_row[idx].strip()", "if filtered is None or not filtered: return None if", "[%s] opened. Accessing cell data ...\" % (sheets.title or \"???\",", "print(\"\") # Time: %(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s])", ">= COL_DATE else None if time_start is None or time_end", "str(duration_val) notes_str = found_row[COL_NOTES] notes = notes_str.split('\\n') # check the", "print(\"\") print(\"The task times do not add up! Tasks vs", "print(\"%s: %d tasks found!\" % (date, len(tasks))) for task in", "days += 1 if hours[index] in SPECIAL_VALUES: if not AS_CSV:", "time_start == 0 else time_start)) continue extra_info = \"\" the_date", "task_details[:-2].strip(), '\\n'.join(other_lines))) def format_tasks(tasks): if not tasks: return '' result", "return None found_row = result_rows[0] found_index = rows.index(found_row) start_val =", "minutes += minutes_day hours_day = int(minutes_day / 60) hours_day_without_lunch =", "if worked_at in ['o', 'O'] or \"OFFICE\" in notes.upper(): extra_info", "if not tasks: return '' result = 'Tasks:\\n' for task", "re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") total_time_minutes_from_tasks = 0 tasks = [] for idx in", "continue print(\"%s: %d tasks found!\" % (date, len(tasks))) for task", "arg == '' else arg load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name) print(\"Done.\")", "None tasks = [] for idx in range(COL_TASKS_START, max_cols): task", "# print(\"Checking environment variable TIMESHEET_URL for spreadsheet URL...\") timesheet_url =", "make this the default for now wfh_info = \"at all", "set!\") user_full_name = \"<NAME>\" print(\"\") print(\"Usage: python timesheet.py [command|date] [date]\")", "elif arg == \"daily\": calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif", "try: return int(parts[0]), 0 except: return 0, 0 def get_client_secret_filenames():", "% (len(filtered), date)) dates, hours = [], [] half_days =", "- 1 minutes_day = minutes_day % 60 total_time_for_date = str(hours_day).zfill(2)", "str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info)) hours = str(minutes / 60).zfill(2)", "1 minutes_day = minutes_day % 60 total_time_for_date = str(hours_day).zfill(2) +", "print(\"Success.\") date = None if len(sys.argv) < 3 else sys.argv[2].strip()", "+= 1 no_lunch = str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2) print(\"%s:", "= \":\"): parts = str(hour).split(split_char) try: local_hours = int(parts[0]) local_minutes", "mode='w') as f: f = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL) #", "if '\\n' in task else task try: g = r.match(t).groups()", "\"client-secrets.json\") cachefile = os.path.join(CURRENT_PATH, \"client-secrets-cache.json\") if not os.path.exists(filename): filename =", "AS_CSV: print(\"%s;%s;\" % (worked_date, hours[index])) else: print(\" %s: %s\\t[%s:00 vs", "SUNDAY = 6 def calc(hour, half_it=False, split_char = \":\"): parts", "# 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") total_time_minutes_from_tasks = 0 tasks =", "%s (without lunch: %s)%s\" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch,", "print(\"Last:\", \"<last> not found\" if last is None else last[COL_DATE])", "% (total_duration, test_duration)) print(\"\") print(\"\") # Time: %(start)s - %(end)s", "1 COL_TIME_START = 2 COL_TIME_END = 3 COL_LUNCH = 4", "60) hours_day_without_lunch = hours_day - 1 minutes_day = minutes_day %", "continue # If it was a half day, meaning I", "= row[COL_MOVE] if max_cols >= COL_MOVE else None notes =", "return None csv_filename = os.path.join(os.getcwd(), \"%s.csv\" % (arg_date)) print(\"\") print(\"Found", "\"<NAME>\" print(\"\") print(\"Usage: python timesheet.py [command|date] [date]\") print(\"Example: python timesheet.py", "else None if time_start is None or time_end is None", "result = 'Additional Notes:\\n' for note in notes: result +=", "CSV\") print(\"- daily: same as stats, except ready to email", "(6 * half_hours) total_time_minutes_from_tasks += minutes other_lines = task.split('\\n')[1:] tasks.append(\"%s", "in sub_tasks[1:]: # actual sub tasks result += '\\n\\t' +", "= other_date rows = load_first_sheet_rows(api, timesheet_url, use_date) timesheet = get_timesheet_for_date(rows,", "no tasks found! %s\" % (date, time_start)) continue print(\"%s: %d", "date)) minutes = 0 days = 0 for row in", "minutes)) details = \"%s %s\" % (task_number, task_details[:-1].strip()) w(task_number, minutes,", "(8 - work_hours_for_the_day) expected_minutes_accumulated_total = expected_hours_accumulated_total * 60 # hours[index]", "(%d) entries for date %s!\" % (len(filtered), date)) print(\"Writing to", "minutes = str(minutes % 60).zfill(2) lunch_hours = str(int(float(hours)) - days).zfill(2)", "rows, date = _load_sheet_data(api, timesheet_url, arg_date) # find the rows", "Off, because %s\" % (worked_date, hours[index])) else: pass else: half_day", "len(sys.argv) < 2 else sys.argv[1].strip() if arg == \"stats\": calc_stats(sheets,", "end_hours, end_minutes = calc(time_end) if start_hours == 0: print(\"%s: Day", "cachefile def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')): print(\"Opening timesheet for %s ...\"", "DAY\") is_same_day = True wfh = u\"\" if len(check_row)-1 <", "sheet = sheets.sheets[0] print(u\"Timesheet [%s] sheet [%s] opened. Accessing cell", "start_minutes = calc(time_start) end_hours, end_minutes = calc(time_end) if start_hours ==", "% (total_hours, total_minutes) expected = 0 actual_h, actual_m = 0,", "csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL) # f.writerow(['<NAME>', 'Accounting', 'November']) f.writerow([\"username\", \"date\",", "import re import sys import arrow from gsheets import Sheets", "arrow from gsheets import Sheets CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG =", "\"\"), split_char=\".\") minutes = (hours * 60) + (6 *", "return result total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2) total_minutes = str(total_time_minutes_from_tasks", "str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info)) hours = str(minutes / 60).zfill(2) minutes", "= 0 for row in filtered: max_cols = len(row) time", "print(\"%s;%s;\" % (worked_date, hours[index])) else: print(\" %s: %s\\t[%s:00 vs %s:%s]", "%s\" % (worked_date, hours[index], expected_hours_accumulated_total, str(actual_h).zfill(2), str(actual_m).zfill(2), \"Half day\" if", "print(\"- stats: show summed up hours and minutes for the", "0 COL_WEEKDAY = 1 COL_TIME_START = 2 COL_TIME_END = 3", "than one entry (%d) found for date %s! Please fix", "else \"\", \"total_hours\": total_hours, \"total_minutes\": total_minutes, } print(\"Total time for", "Exception: # pylint: disable=W0703 now = arrow.now() date = now.format('YYYYMM')", "30 minutes local_h, local_m = calc(hours[index]) work_hours += local_h work_minutes", "in it result_rows = [row for row in rows if", "1: result += '\\n* ' + sub_tasks[0] # main task", "minutes, lunch_hours, minutes)) print(\"\") def calc_stats(api, timesheet_url, arg_date=None): rows, date", "def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')): print(\"Opening timesheet for %s ...\" %", "day_type = row[COL_TIME_START] if max_cols >= COL_TIME_START else None date", "in details.lower() else \"Development\" # Needed CSV columns # username|date|task|duration|work_type|details", "[] for idx in range(COL_TASKS_START, max_cols): task = row[idx].strip() if", "max_cols >= COL_TIME_FIXED else None tasks = [] for idx", "not os.path.exists(filename): filename = os.path.expanduser(os.path.join(\"~\", \"client-secrets.json\")) cachefile = os.path.expanduser(os.path.join(\"~\", \"client-secrets-cache.json\"))", "spend on each task\") print(\"\") print(\"\"\"Tip: use \"DEBUG=1 timesheet <parameter>\"", "each workday has 8 hours of work, but on half", "row in filtered: max_cols = len(row) time = row[COL_TIME_FIXED] if", "str(int(float(hours)) - days).zfill(2) print(\"\") print(\"Total days worked: %s\" % str(days))", "= 2 COL_TIME_END = 3 COL_LUNCH = 4 COL_TIME =", "filtered is None or not filtered: return None if not", "notes = row[COL_NOTES] if max_cols >= COL_NOTES else \"\" if", "= calc(time_end) if start_hours == 0: print(\"%s: Day off because", "f = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL) # f.writerow(['<NAME>', 'Accounting', 'November'])", "= 6 hours and 30 minutes actual_h += int(work_minutes /", "+= minutes other_lines = task.split('\\n')[1:] tasks.append(\"%s %s\\n%s\" % (task_number.strip(), task_details[:-2].strip(),", "or time_end is None or date is None: continue start_hours,", "total_time_for_date, no_lunch, extra_info)) hours = str(minutes / 60).zfill(2) minutes =", "in filtered: max_cols = len(row) time = row[COL_TIME_FIXED] if max_cols", "wfh = wfh.replace(\", \", \",\").replace(\",\", \" and \") wfh_extra =", "find the rows for the given month filtered = [row", "print(\"\\n\") else: print(\"No entry found for %s\" % use_date) def", "row[COL_DATE] if max_cols >= COL_DATE else None if time_start is", "main(): # print(\"Checking environment variable TIMESHEET_URL for spreadsheet URL...\") timesheet_url", "(csv_filename)) with open(csv_filename, mode='w') as f: f = csv.writer(f, delimiter=',',", "print(\"*\" * 50) print(\"\") print(\"Valid hours entries: %s\\t[required vs actual]\"", "%s $ %s\" % (hours, half_hours, duration, minutes)) details =", "Accessing cell data ...\" % (sheets.title or \"???\", sheet.title or", "is_same_day = False else: # print(\"SAME DAY\") is_same_day = True", "else \"Development\" # Needed CSV columns # username|date|task|duration|work_type|details f.writerow([\"daniel\", arrow.get(str(date),", "0 except: return 0, 0 def get_client_secret_filenames(): filename = os.path.join(CURRENT_PATH,", "'\\n' in task: sub_tasks = task.split('\\n') if len(sub_tasks) > 1:", "if '\\n' in task: sub_tasks = task.split('\\n') if len(sub_tasks) >", "timesheet for %s ...\" % (date)) sheets = api.get(timesheet_url) sheet", "% (wfh_extra, wfh) if wfh != \"\" else \"all days\"", "stats: show summed up hours and minutes for the given/current", "task.split('\\n')[1:] tasks.append(\"%s %s\\n%s\" % (task_number.strip(), task_details[:-2].strip(), '\\n'.join(other_lines))) def format_tasks(tasks): if", "COL_MOVE else None notes = row[COL_NOTES] if max_cols >= COL_NOTES", "%s\" % (t, str(ex))) continue if DEBUG: print(\"task: %s\" %", "notes.upper(): extra_info += \" - Commute to office\" minutes_day =", "not found\" if last is None else last[COL_DATE]) print(\"\") print(\"Total", "\"stats\": calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg == \"daily\":", "to add your start time.\") return None if not end_val:", "# check the previous Friday entry (if today is not", "calc(hours[index]) work_hours += local_h work_minutes += local_m actual_h = work_hours", "if max_cols >= COL_TIME_START else None time_end = row[COL_TIME_END] if", "$ %s $ %s\" % (hours, half_hours, duration, minutes)) details", "check_row[COL_WEEKDAY] or \"\").lower().startswith(\"fr\"): break check_start_index += 1 is_same_day = None", "arrow.now().format('YYYYMM')) elif arg == \"csv\": export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))", "result def format_notes(notes): if not notes or (len(notes) == 1", "\"\").lower().startswith(\"fr\"): break check_start_index += 1 is_same_day = None if check_start_index", "'O'] or \"OFFICE\" in notes.upper(): extra_info += \" - Commute", "row[COL_TIME_END] if max_cols >= COL_TIME_END else None date = row[COL_DATE]", "of 8, aka 4. work_hours_for_the_day = 8 if not half_day", "print(\"Available commands:\") print(\"- stats: show summed up hours and minutes", "= \"\"\"%s %s\"\"\" % (wfh_extra, wfh) if wfh != \"\"", "hours_day = int(minutes_day / 60) hours_day_without_lunch = hours_day - 1", "= len(found_row) if not start_val: if start_val in SPECIAL_VALUES: print(\"You", "with the first column that has today's date in it", "worked_at = row[COL_MOVE] if max_cols >= COL_MOVE else None notes", "in row[COL_WORK_FROM_HOME] if half_day: half_days[date] = time hours.append(time) dates.append(date) if", "while (check_start_index < found_index): check_row = rows[check_start_index] if (len(check_row) >", "= found_row[idx].strip() if task: t = task.split('\\n')[0] if '\\n' in", "GOT PREVS FRIDAY.\") is_same_day = False else: # print(\"SAME DAY\")", "\"\"\".strip() % { \"date\": date, \"user_full_name\": user_full_name, \"start\": start, \"end\":", "0, 0, \"\" for index, hour in enumerate(hours): date =", "% (worked_date, hours[index])) else: print(\" %s: %s\\t[%s:00 vs %s:%s] %s\"", "try: date = arrow.get(arg_date, 'YYYYMM') except Exception: # pylint: disable=W0703", "COL_NOTES: # print(\"No notes/tasks entered yet.\") # return None def", "filtered: return None if not AS_CSV: print(\"\") print(\"Found (%d) entries", "task_details, task_duration] = g hours, half_hours = calc(task_duration.replace(\"h\", \"\"), split_char=\".\")", "entered: %s != %s\" % (total_duration, test_duration)) print(\"\") print(\"\") #", "start time.\") return None if not end_val: if end_val in", "%s\" % len(g)) [task_number, task_details, task_duration] = g hours, half_hours", "local_h, local_m = calc(hours[index]) work_hours += local_h work_minutes += local_m", "\"\").strip() if not user_full_name: print(\"Warning: USER_FULL_NAME environment variable not set!\")", "timesheet.py stats\\\" to format the output\") print(\" as CSV\") print(\"-", "date is None: continue tasks = [] for idx in", "'November']) f.writerow([\"username\", \"date\", \"task\", \"duration\", \"work_type\", \"details\"]) def w(task, duration_minutes,", "% 60).zfill(2) lunch_hours = str(int(float(hours)) - days).zfill(2) print(\"\") print(\"Total days", "first is None else first[COL_DATE]) print(\"Last:\", \"<last> not found\" if", "print(\"Writing to %s\" % (csv_filename)) with open(csv_filename, mode='w') as f:", "= row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None tasks =", "user_full_name: print(\"Warning: USER_FULL_NAME environment variable not set!\") user_full_name = \"<NAME>\"", "% (date, len(tasks))) for task in tasks: t = task.split('\\n')[0]", "%(date)s WFH: %(wfh_info)s Hi, Daily Report for Date: %(date)s %(tasks)s", "workday has 8 hours of work, but on half days", "os.environ.get('USER_FULL_NAME', \"\").strip() if not user_full_name: print(\"Warning: USER_FULL_NAME environment variable not", "'Tasks:\\n' for task in tasks: if '\\n' in task: sub_tasks", "SCAN-4167 # As a developer, I want to update AIScanRobo", "return None #if max_cols >= COL_NOTES: # print(\"No notes/tasks entered", "not set!\") user_full_name = \"<NAME>\" print(\"\") print(\"Usage: python timesheet.py [command|date]", "not tasks: continue # If it was a half day,", "python timesheet.py [command|date] [date]\") print(\"Example: python timesheet.py stats 202011\") print(\"Example:", "% (len(result_rows), date)) return None found_row = result_rows[0] found_index =", "+= int(work_minutes / 60) actual_m = work_minutes % 60 if", "= g hours, half_hours = calc(task_duration.replace(\"h\", \"\"), split_char=\".\") minutes =", "if task: tasks.append(task) if len(tasks) == 0: print(\"%s: no tasks", "if AS_CSV: print(\"%s;%s;\" % (worked_date, hours[index])) else: print(\" %s: %s\\t[%s:00", "= _load_sheet_data(api, timesheet_url, arg_date) filtered = [row for row in", "to %s\" % (csv_filename)) with open(csv_filename, mode='w') as f: f", "total_hours, total_minutes)) return msg def _load_sheet_data(api, timesheet_url, arg_date=None): try: date", "in [SATURDAY, SUNDAY]: extra_info += \" - Weekend work\" half_day", "+= \" - half day PTO\" if worked_at in ['o',", "= g hours, half_hours = calc(duration.replace(\"h\", \"\"), split_char=\".\") minutes =", "4: test_duration = \"0%s\" % duration if total_duration != test_duration:", "the previous Friday entry (if today is not Friday), to", "for %s ...\" % (date)) sheets = api.get(timesheet_url) sheet =", "found_index: # print(\"HA! GOT PREVS FRIDAY.\") is_same_day = False else:", "day_type is None: continue if day_type in SPECIAL_VALUES: time =", "hours:minutes\" % (total_hours, total_minutes) expected = 0 actual_h, actual_m =", "half_hours) total_time_minutes_from_tasks += minutes other_lines = task.split('\\n')[1:] tasks.append(\"%s %s\\n%s\" %", "(total_hours, total_minutes) test_duration = duration if len(test_duration) <= 4: test_duration", "task = row[idx].strip() if task: tasks.append(task) day_type = row[COL_TIME_START] if", "secrets_file, cache_file = get_client_secret_filenames() sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False) print(\"Success.\")", "= str(total_time_minutes_from_tasks % 60).zfill(2) total_duration = \"%s:%s\" % (total_hours, total_minutes)", "(6 * half_hours) if DEBUG: print(\"time: %s, %s $ %s", "just make this the default for now wfh_info = \"at", "try: local_hours = int(parts[0]) local_minutes = int(parts[1]) if half_it: local_hours", "Notes:\\n' for note in notes: result += '\\n* ' +", "if row and str(row[COL_DATE]).startswith(date)] if filtered is None or not", "index, hour in enumerate(hours): date = dates[index] local_hours, local_minutes =", "hours[index] is the actual time worked, e.g. 6:30 means 6", "details.lower() else \"Development\" # Needed CSV columns # username|date|task|duration|work_type|details f.writerow([\"daniel\",", "= day_type hours.append(time) dates.append(date) continue elif not tasks: continue #", "cachefile = os.path.expanduser(os.path.join(\"~\", \"client-secrets-cache.json\")) if not os.path.exists(filename): raise Exception(\"Please provide", "(%d) entries for date %s!\" % (len(filtered), date)) minutes =", "< 3 else sys.argv[2].strip() arg = \"read today\" if len(sys.argv)", "\"0%s\" % duration if total_duration != test_duration: print(\"\") print(\"\") print(\"The", "(len(notes) == 1 and not notes[0]): return '' result =", "worked_date in half_days # each workday has 8 hours of", "not filtered: return None if not AS_CSV: print(\"\") print(\"Found (%d)", "not half_day else 4 expected_hours_accumulated_total += 8 - (8 -", "api.get(timesheet_url) sheet = sheets.sheets[0] print(u\"Timesheet [%s] sheet [%s] opened. Accessing", "\"hol\"] SATURDAY = 5 SUNDAY = 6 def calc(hour, half_it=False,", "row[COL_DATE] if max_cols >= COL_DATE else None worked_at = row[COL_MOVE]", "entries: %s\\t[required vs actual]\" % len(hours)) deduct_work_hours = 0 work_hours", "%s\" % (hours, half_hours, duration, minutes)) details = \"%s %s\"", "$ %s\" % (hours, half_hours, duration, minutes)) details = \"%s", "format_tasks(tasks) if tasks else \"\", \"notes\": format_notes(notes) if notes else", "row in rows if row and str(row[COL_DATE]).startswith(date)] if filtered is", "load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name): now = arrow.now() today = now.format('YYYYMMDD')", "print(\" %s: %s\\t[%s:00 vs %s:%s] %s\" % (worked_date, hours[index], expected_hours_accumulated_total,", "[1h] # 3 groups: # SCAN-4167 # As a developer,", "%s\" % (date, total_time)) print(\"\") print(\"*\" * 50) def main():", "%s\" % (csv_filename)) with open(csv_filename, mode='w') as f: f =", "0: print(\"%s: no tasks found! %s\" % (date, time_start)) continue", "lunch: %s:%s)\" % (hours, minutes, lunch_hours, minutes)) print(\"\") def calc_stats(api,", "time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None time_end", "details]) # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167 As a developer,", "load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name) print(\"Done.\") if __name__ == \"__main__\": main()", "\"This week\" wfh_info = \"\"\"%s %s\"\"\" % (wfh_extra, wfh) if", "hour in enumerate(hours): date = dates[index] local_hours, local_minutes = calc(hour,", "os.environ.get('TIMESHEET_URL', \"\").strip() if not timesheet_url: raise Exception(\"Please set the TIMESHEET_URL", "str(int(total_time_minutes_from_tasks / 60)).zfill(2) total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2) total_duration =", "python timesheet.py stats 202011\") print(\"Example: python timesheet.py 20201130\") print(\"\") print(\"Available", "for date %s!\" % (len(filtered), date)) minutes = 0 days", "AS_CSV: print(\" %s: Off, because %s\" % (worked_date, hours[index])) else:", "the given/current month\") print(\" use \\\"CSV=1 python timesheet.py stats\\\" to", "not end_val: if end_val in SPECIAL_VALUES: print(\"You forgot to add", "return msg def _load_sheet_data(api, timesheet_url, arg_date=None): try: date = arrow.get(arg_date,", "print(\"No entry found for %s\" % use_date) def get_timesheet_for_date(rows, date,", "wfh.replace(\", \", \",\").replace(\",\", \" and \") wfh_extra = \"Next week\"", "for index, worked_date in enumerate(dates): days += 1 if hours[index]", "arrow.now() date = now.format('YYYYMM') rows = load_first_sheet_rows(api, timesheet_url, date) date_str", "* 50) def main(): # print(\"Checking environment variable TIMESHEET_URL for", "hours of work, but on half days it is only", "return 0, 0 def get_client_secret_filenames(): filename = os.path.join(CURRENT_PATH, \"client-secrets.json\") cachefile", "in ['o', 'O'] or \"OFFICE\" in notes.upper(): extra_info += \"", "Report for Date: %(date)s %(tasks)s %(notes)s Kind regards, %(user_full_name)s \"\"\".strip()", "None or not filtered: return None if not AS_CSV: print(\"\")", "worked, e.g. 6:30 means 6 hours and 30 minutes local_h,", "else: print(\"No entry found for %s\" % use_date) def get_timesheet_for_date(rows,", "expected_hours_accumulated_total += 8 - (8 - work_hours_for_the_day) expected_minutes_accumulated_total = expected_hours_accumulated_total", "every week [ # 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") for row", "not notes or (len(notes) == 1 and not notes[0]): return", "1 if hours[index] in SPECIAL_VALUES: if not AS_CSV: print(\" %s:", "continue extra_info = \"\" the_date = arrow.get(str(date), 'YYYYMMDD') if the_date.weekday()", "202011\") print(\"Example: python timesheet.py 20201130\") print(\"\") print(\"Available commands:\") print(\"- stats:", "month and time spend on each task\") print(\"\") print(\"\"\"Tip: use", "then only count half the time half_day = 'half' in", "max_cols >= COL_TIME_START else None date = row[COL_DATE] if max_cols", "date)) print(\"Writing to %s\" % (csv_filename)) with open(csv_filename, mode='w') as", "COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME] wfh = wfh.replace(\"Mon\", \"Monday\") wfh = wfh.replace(\"Tue\",", "is None or time_end is None or date is None:", "7 COL_WORK_FROM_HOME = 8 COL_NOTES = 9 COL_TASKS_START = 10", "%s:%s] %s\" % (worked_date, hours[index], expected_hours_accumulated_total, str(actual_h).zfill(2), str(actual_m).zfill(2), \"Half day\"", "= minutes_day % 60 total_time_for_date = str(hours_day).zfill(2) + ':' +", "- %s\" % (t, str(ex))) continue if DEBUG: print(\"task: %s\"", "print(\" as CSV\") print(\"- daily: same as stats, except ready", "# pylint: disable=W0703 now = arrow.now() date = now.format('YYYYMM') rows", "calc(time_end) if start_hours == 0: print(\"%s: Day off because of", "[%(total_hours)s:%(total_minutes)s]) msg = \"\"\" [Daily Report] %(date)s WFH: %(wfh_info)s Hi,", "rows = load_first_sheet_rows(api, timesheet_url, date) date_str = str(date.format('YYYYMM')) return (rows,", "load_first_sheet_rows(api, timesheet_url, date) date_str = str(date.format('YYYYMM')) return (rows, date_str) def", "else sys.argv[2].strip() arg = \"read today\" if len(sys.argv) < 2", "* 50) print(\"\") print(\"Valid hours entries: %s\\t[required vs actual]\" %", "filtered: return None print(\"\") print(\"Found (%d) entries for date %s!\"", "+ (6 * half_hours) if DEBUG: print(\"time: %s, %s $", "wfh = wfh.replace(\"Tue\", \"Tuesday\") wfh = wfh.replace(\"Wed\", \"Wednesday\") wfh =", "None if not AS_CSV: print(\"\") print(\"Found (%d) entries for date", "get_client_secret_filenames() sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False) print(\"Success.\") date = None", "print(\"Example: python timesheet.py stats 202011\") print(\"Example: python timesheet.py 20201130\") print(\"\")", "the given month filtered = [row for row in rows", "%s\\n%s\" % (task_number.strip(), task_details[:-2].strip(), '\\n'.join(other_lines))) def format_tasks(tasks): if not tasks:", "month\") print(\" use \\\"CSV=1 python timesheet.py stats\\\" to format the", "(date)) sheets = api.get(timesheet_url) sheet = sheets.sheets[0] print(u\"Timesheet [%s] sheet", "wfh = wfh.replace(\"Fri\", \"Friday\") wfh = wfh.replace(\", \", \",\").replace(\",\", \"", "len(sub_tasks) > 1: result += '\\n* ' + sub_tasks[0] #", "else None time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else", "it result_rows = [row for row in rows if row", "actual sub tasks result += '\\n\\t' + sub_task result +=", "if len(sys.argv) < 3 else sys.argv[2].strip() arg = \"read today\"", "minutes for the given/current month\") print(\" use \\\"CSV=1 python timesheet.py", "= load_first_sheet_rows(api, timesheet_url, use_date) timesheet = get_timesheet_for_date(rows, use_date, user_full_name) if", "[ # 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") total_time_minutes_from_tasks = 0 tasks", "and str(row[COL_DATE]) == date] if result_rows is None or not", "%s - %s:%s\" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes)) return msg", "None time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None", "1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") total_time_minutes_from_tasks = 0 tasks = []", "filtered is None or not filtered: return None print(\"\") print(\"Found", "0: print(\"%s: Day off because of %s\" % (date, \"whatever\"", "+= '\\n* ' + task else: result += '\\n* '", "import sys import arrow from gsheets import Sheets CURRENT_PATH =", "CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG = os.environ.get('DEBUG', \"0\") == \"1\" AS_CSV", "time hours.append(time) dates.append(date) if first is None: first = row", "to: %s\" % (csv_filename)) def calc_daily_hours_for_month(api, timesheet_url, arg_date): rows, date", "# actual sub tasks result += '\\n\\t' + sub_task result", "if max_cols >= COL_DATE else None if day_type is None:", "for task in tasks: if '\\n' in task: sub_tasks =", "has 8 hours of work, but on half days it", "if not timesheet_url: raise Exception(\"Please set the TIMESHEET_URL environment variable", "not result_rows: return None if len(result_rows) != 1: print(\"More than", "half a day off, then only count half the time", "len(test_duration) <= 4: test_duration = \"0%s\" % duration if total_duration", "' + task else: result += '\\n* ' + task", "except: if len(parts) == 1: try: return int(parts[0]), 0 except:", "is None or date is None: continue start_hours, start_minutes =", "DEBUG: print(\"time: %s, %s $ %s $ %s\" % (hours,", "None notes = row[COL_NOTES] if max_cols >= COL_NOTES else \"\"", "do not add up! Tasks vs time entered: %s !=", "found_index if weekday.startswith(\"fr\") else found_index - 7 check_row = found_row", "+= end_minutes - start_minutes minutes += minutes_day hours_day = int(minutes_day", "'half' in row[COL_WORK_FROM_HOME] if half_day: half_days[date] = time hours.append(time) dates.append(date)", "week [1h] # 3 groups: # SCAN-4167 # As a", "timesheet_url, arg_date) filtered = [row for row in rows if", "start = parse_hours(start_val).format(\"HH:mm\") end = parse_hours(end_val).format(\"HH:mm\") duration = str(duration_val) notes_str", "other_date = today use_date = other_date rows = load_first_sheet_rows(api, timesheet_url,", "result_rows = [row for row in rows if row and", "SPECIAL_VALUES: print(\"You forgot to add your start time.\") return None", "= expected_hours_accumulated_total * 60 # hours[index] is the actual time", "e.g. 6:30 means 6 hours and 30 minutes local_h, local_m", "user_full_name = os.environ.get('USER_FULL_NAME', \"\").strip() if not user_full_name: print(\"Warning: USER_FULL_NAME environment", "- %s:%s\" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes)) return msg def", "update AIScanRobo every week [1h] # 3 groups: # SCAN-4167", "len(parts) == 1: try: return int(parts[0]), 0 except: return 0,", "= str(date.format('YYYYMM')) return (rows, date_str) def export_csv(api, timesheet_url, arg_date): rows,", "None or date is None: continue start_hours, start_minutes = calc(time_start)", "wfh = wfh.replace(\"Wed\", \"Wednesday\") wfh = wfh.replace(\"Thu\", \"Thursday\") wfh =", "other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD') except arrow.parser.ParserError: other_date = today use_date", "minutes = (hours * 60) + (6 * half_hours) total_time_minutes_from_tasks", "row[COL_DATE] if max_cols >= COL_DATE else None if day_type is", "print(\"groups: %s\" % len(g)) [task_number, task_details, task_duration] = g hours,", "timesheet.py [command|date] [date]\") print(\"Example: python timesheet.py stats 202011\") print(\"Example: python", "parse_hours(start_val).format(\"HH:mm\") end = parse_hours(end_val).format(\"HH:mm\") duration = str(duration_val) notes_str = found_row[COL_NOTES]", "% 60).zfill(2) total_duration = \"%s:%s\" % (total_hours, total_minutes) test_duration =", "8 COL_NOTES = 9 COL_TASKS_START = 10 SPECIAL_VALUES = [\"sick\",", "print_function import csv import os import re import sys import", "%s\"\"\" % (wfh_extra, wfh) if wfh != \"\" else \"all", "# Needed CSV columns # username|date|task|duration|work_type|details f.writerow([\"daniel\", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task,", "+ str(minutes_day).zfill(2) days += 1 no_lunch = str(hours_day_without_lunch).zfill(2) + ':'", "max_cols): task = found_row[idx].strip() if task: t = task.split('\\n')[0] if", "max_cols >= COL_DATE else None if time_start is None or", "in SPECIAL_VALUES: print(\"You forgot to add your end time.\") return", "tasks else \"\", \"notes\": format_notes(notes) if notes else \"\", \"total_hours\":", "local_m = calc(hours[index]) work_hours += local_h work_minutes += local_m actual_h", "\"\"): work_type = \"Meeting\" if \"meeting\" in details.lower() else \"Development\"", "/ 2 local_minutes = local_minutes / 2 return local_hours, local_minutes", "filename, cachefile def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')): print(\"Opening timesheet for %s", "max_cols >= COL_NOTES: # print(\"No notes/tasks entered yet.\") # return", "timesheet_url = os.environ.get('TIMESHEET_URL', \"\").strip() if not timesheet_url: raise Exception(\"Please set", "filename = os.path.expanduser(os.path.join(\"~\", \"client-secrets.json\")) cachefile = os.path.expanduser(os.path.join(\"~\", \"client-secrets-cache.json\")) if not", "Needed CSV columns # username|date|task|duration|work_type|details f.writerow([\"daniel\", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, \"%dm\"", "% (t, str(ex))) continue if DEBUG: print(\"task: %s\" % (t))", "+= \" - Weekend work\" half_day = 'half' in row[COL_WORK_FROM_HOME]", "\"OFFICE\" in notes.upper(): extra_info += \" - Commute to office\"", "username|date|task|duration|work_type|details f.writerow([\"daniel\", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, \"%dm\" % (duration_minutes), work_type, details])", "6 # does not include lunch COL_MOVE = 7 COL_WORK_FROM_HOME", "# print(\"SAME DAY\") is_same_day = True wfh = u\"\" if", "(use_date)) print(timesheet) print(\"\\n\") else: print(\"No entry found for %s\" %", "rows[check_start_index] if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or \"\").lower().startswith(\"fr\"): break", "!= test_duration: print(\"\") print(\"\") print(\"The task times do not add", "use_date, user_full_name) if timesheet: print(\"\\n\\n\") print(\"Timesheet for %s\" % (use_date))", "use \\\"CSV=1 python timesheet.py stats\\\" to format the output\") print(\"", "% (t)) print(\"groups: %s\" % len(g)) [task_number, task_details, duration] =", "total_time_for_date = str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2) days += 1", "print(\"Warning: USER_FULL_NAME environment variable not set!\") user_full_name = \"<NAME>\" print(\"\")", "(found_row[COL_WEEKDAY] or \"\").lower() check_start_index = found_index if weekday.startswith(\"fr\") else found_index", "return None if not end_val: if end_val in SPECIAL_VALUES: print(\"You", "https://github.com/xflr6/gsheets#quickstart\") return filename, cachefile def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')): print(\"Opening timesheet", "- half day PTO\" if worked_at in ['o', 'O'] or", "in half_days # each workday has 8 hours of work,", "= 0 expected_hours_accumulated_total = 0 for index, worked_date in enumerate(dates):", "sheets = api.get(timesheet_url) sheet = sheets.sheets[0] print(u\"Timesheet [%s] sheet [%s]", "timesheet_url, use_date) timesheet = get_timesheet_for_date(rows, use_date, user_full_name) if timesheet: print(\"\\n\\n\")", "#if max_cols >= COL_NOTES: # print(\"No notes/tasks entered yet.\") #", "(worked_date, hours[index])) else: pass else: half_day = worked_date in half_days", "date=arrow.now().format('YYYYMMDD')): print(\"Opening timesheet for %s ...\" % (date)) sheets =", "to email to HR\") print(\"- csv: task breakdown for the", "%s)%s\" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info)) hours =", "vs actual]\" % len(hours)) deduct_work_hours = 0 work_hours = 0", "selected weekday = (found_row[COL_WEEKDAY] or \"\").lower() check_start_index = found_index if", "sys.argv[1].strip() if arg == \"stats\": calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))", "\"all days\" # 2021-01-04 just make this the default for", "day, meaning I took half a day off, then only", "half_days[date] = time hours.append(time) dates.append(date) if first is None: first", "\"\"\"%s %s\"\"\" % (wfh_extra, wfh) if wfh != \"\" else", "\"\" the_date = arrow.get(str(date), 'YYYYMMDD') if the_date.weekday() in [SATURDAY, SUNDAY]:", "expected_hours_accumulated_total = 0 for index, worked_date in enumerate(dates): days +=", "task\") print(\"\") print(\"\"\"Tip: use \"DEBUG=1 timesheet <parameter>\" to enable debug", "= calc(task_duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours * 60) +", "in SPECIAL_VALUES: if not AS_CSV: print(\" %s: Off, because %s\"", "return filename, cachefile def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')): print(\"Opening timesheet for", "7 check_row = found_row while (check_start_index < found_index): check_row =", "# -*- coding: utf-8 -*- # # from __future__ import", "str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2) days += 1 no_lunch =", "days were were selected weekday = (found_row[COL_WEEKDAY] or \"\").lower() check_start_index", "print(\"\") print(\"Total time in %s: %s\" % (date, total_time)) print(\"\")", "= 4 COL_TIME = 5 # includes lunch COL_TIME_FIXED =", "all times, unless mentioned otherwise below\" # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) #", "ready to email to HR\") print(\"- csv: task breakdown for", "max_cols): task = row[idx].strip() if task: tasks.append(task) day_type = row[COL_TIME_START]", ">= COL_TIME_START else None time_end = row[COL_TIME_END] if max_cols >=", "if not start_val: if start_val in SPECIAL_VALUES: print(\"You forgot to", "= row[COL_TIME_START] if max_cols >= COL_TIME_START else None date =", "%s\" % (date, \"whatever\" if time_start == 0 else time_start))", "load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')): print(\"Opening timesheet for %s ...\" % (date))", "== date] if result_rows is None or not result_rows: return", "f.writerow(['<NAME>', 'Accounting', 'November']) f.writerow([\"username\", \"date\", \"task\", \"duration\", \"work_type\", \"details\"]) def", "and check_row[COL_WEEKDAY] or \"\").lower().startswith(\"fr\"): break check_start_index += 1 is_same_day =", "= arrow.now() date = now.format('YYYYMM') rows = load_first_sheet_rows(api, timesheet_url, date)", "days it is only half of 8, aka 4. work_hours_for_the_day", "%s! Please fix your sheet!\" % (len(result_rows), date)) return None", "\"work_type\", \"details\"]) def w(task, duration_minutes, details = \"\"): work_type =", "if half_day: half_days[date] = time hours.append(time) dates.append(date) if first is", "= 6 # does not include lunch COL_MOVE = 7", "__future__ import print_function import csv import os import re import", "date = _load_sheet_data(api, timesheet_url, arg_date) # find the rows for", "lunch COL_TIME_FIXED = 6 # does not include lunch COL_MOVE", "% (worked_date, hours[index])) else: pass else: half_day = worked_date in", "total_minutes = total_minutes % 60 total_time = \"%d:%d hours:minutes\" %", "(csv_filename)) def calc_daily_hours_for_month(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api, timesheet_url,", "test_duration: print(\"\") print(\"\") print(\"The task times do not add up!", "\"Meeting\" if \"meeting\" in details.lower() else \"Development\" # Needed CSV", "local_minutes = calc(hour, date in half_days) total_hours += local_hours total_minutes", "half_days) total_hours += local_hours total_minutes += local_minutes if total_minutes >=", "break check_start_index += 1 is_same_day = None if check_start_index !=", "print(\"\") print(\"First:\", \"<first> not found\" if first is None else", "was a half day, meaning I took half a day", "os.path.join(CURRENT_PATH, \"client-secrets-cache.json\") if not os.path.exists(filename): filename = os.path.expanduser(os.path.join(\"~\", \"client-secrets.json\")) cachefile", "+ ':' + str(minutes_day).zfill(2) days += 1 no_lunch = str(hours_day_without_lunch).zfill(2)", "as CSV\") print(\"- daily: same as stats, except ready to", "* half_hours) total_time_minutes_from_tasks += minutes other_lines = task.split('\\n')[1:] tasks.append(\"%s %s\\n%s\"", "# SCAN-4167 # As a developer, I want to update", "= \"at all times, unless mentioned otherwise below\" # regex:", "except Exception as ex: print(\"ERROR: %s - %s\" % (t,", "for date %s!\" % (len(filtered), date)) print(\"Writing to %s\" %", "format_notes(notes) if notes else \"\", \"total_hours\": total_hours, \"total_minutes\": total_minutes, }", "return None print(\"\") print(\"Found (%d) entries for date %s!\" %", "rows, date = _load_sheet_data(api, timesheet_url, arg_date) filtered = [row for", "if half_day: extra_info += \" - half day PTO\" if", "found!\" % (date, len(tasks))) for task in tasks: t =", "format_tasks(tasks): if not tasks: return '' result = 'Tasks:\\n' for", "hours, half_hours = calc(task_duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours *", "= False else: # print(\"SAME DAY\") is_same_day = True wfh", "spreadsheet URL...\") timesheet_url = os.environ.get('TIMESHEET_URL', \"\").strip() if not timesheet_url: raise", "URL...\") timesheet_url = os.environ.get('TIMESHEET_URL', \"\").strip() if not timesheet_url: raise Exception(\"Please", "or (len(notes) == 1 and not notes[0]): return '' result", "= str(duration_val) notes_str = found_row[COL_NOTES] notes = notes_str.split('\\n') # check", "= (found_row[COL_WEEKDAY] or \"\").lower() check_start_index = found_index if weekday.startswith(\"fr\") else", "= hours_day - 1 minutes_day = minutes_day % 60 total_time_for_date", "(%d) found for date %s! Please fix your sheet!\" %", "AS_CSV: print(\"\") print(\"First:\", \"<first> not found\" if first is None", "and 30 minutes actual_h += int(work_minutes / 60) actual_m =", "\"<first> not found\" if first is None else first[COL_DATE]) print(\"Last:\",", "None #if max_cols >= COL_NOTES: # print(\"No notes/tasks entered yet.\")", "is None or not filtered: return None csv_filename = os.path.join(os.getcwd(),", "for row in filtered: max_cols = len(row) time = row[COL_TIME_FIXED]", "elif arg == \"csv\": export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) else:", "= calc(hours[index]) work_hours += local_h work_minutes += local_m actual_h =", "# each workday has 8 hours of work, but on", "if max_cols >= COL_DATE else None worked_at = row[COL_MOVE] if", "[ # 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") for row in filtered:", "%s ...\" % (date)) sheets = api.get(timesheet_url) sheet = sheets.sheets[0]", "stats\\\" to format the output\") print(\" as CSV\") print(\"- daily:", "work_minutes += local_m actual_h = work_hours # 330 minutes =", "COL_WEEKDAY = 1 COL_TIME_START = 2 COL_TIME_END = 3 COL_LUNCH", "None if len(sys.argv) < 3 else sys.argv[2].strip() arg = \"read", "email to HR\") print(\"- csv: task breakdown for the month", "total_minutes >= 60: total_hours += (total_minutes / 60) total_minutes =", "total_time_minutes_from_tasks = 0 tasks = [] for idx in range(COL_TASKS_START,", "day PTO\" if worked_at in ['o', 'O'] or \"OFFICE\" in", "calc(hour, half_it=False, split_char = \":\"): parts = str(hour).split(split_char) try: local_hours", "client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart\") return filename, cachefile def", "% 60 if AS_CSV: print(\"%s;%s;\" % (worked_date, hours[index])) else: print(\"", "\"Tuesday\") wfh = wfh.replace(\"Wed\", \"Wednesday\") wfh = wfh.replace(\"Thu\", \"Thursday\") wfh", "half day PTO\" if worked_at in ['o', 'O'] or \"OFFICE\"", "if arg == \"stats\": calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif", "total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2) total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2)", "< found_index): check_row = rows[check_start_index] if (len(check_row) > COL_WEEKDAY and", "sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False) print(\"Success.\") date = None if", "start_hours, start_minutes = calc(time_start) end_hours, end_minutes = calc(time_end) if start_hours", "(%(duration)s hours total [%(total_hours)s:%(total_minutes)s]) msg = \"\"\" [Daily Report] %(date)s", "continue start_hours, start_minutes = calc(time_start) end_hours, end_minutes = calc(time_end) if", "(%d) entries for date %s!\" % (len(filtered), date)) dates, hours", "home # days were were selected weekday = (found_row[COL_WEEKDAY] or", "- %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s]) msg = \"\"\" [Daily", "\"off\", \"wfh\", \"hol\"] SATURDAY = 5 SUNDAY = 6 def", "% use_date) def get_timesheet_for_date(rows, date, user_full_name): # find the row", "print(\"More than one entry (%d) found for date %s! Please", "int(parts[0]) local_minutes = int(parts[1]) if half_it: local_hours = local_hours /", "dates.append(date) continue elif not tasks: continue # If it was", "columns # username|date|task|duration|work_type|details f.writerow([\"daniel\", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, \"%dm\" % (duration_minutes),", "(date, time_start)) continue print(\"%s: %d tasks found!\" % (date, len(tasks)))", "\"client-secrets-cache.json\") if not os.path.exists(filename): filename = os.path.expanduser(os.path.join(\"~\", \"client-secrets.json\")) cachefile =", "in tasks: t = task.split('\\n')[0] if '\\n' in task else", "were selected weekday = (found_row[COL_WEEKDAY] or \"\").lower() check_start_index = found_index", "(date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info)) hours = str(minutes /", "expected_hours_accumulated_total * 60 # hours[index] is the actual time worked,", "= 0 for index, worked_date in enumerate(dates): days += 1", "= sheets.sheets[0] print(u\"Timesheet [%s] sheet [%s] opened. Accessing cell data", "enumerate(dates): days += 1 if hours[index] in SPECIAL_VALUES: if not", "%(user_full_name)s \"\"\".strip() % { \"date\": date, \"user_full_name\": user_full_name, \"start\": start,", "os.path.join(os.getcwd(), \"%s.csv\" % (arg_date)) print(\"\") print(\"Found (%d) entries for date", "(len(filtered), date)) dates, hours = [], [] half_days = {}", "= 0, 0, \"\" for index, hour in enumerate(hours): date", "% (hours, minutes, lunch_hours, minutes)) print(\"\") def calc_stats(api, timesheet_url, arg_date=None):", "len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME] wfh = wfh.replace(\"Mon\", \"Monday\") wfh", "for task in tasks: t = task.split('\\n')[0] if '\\n' in", "lunch_hours, minutes)) print(\"\") def calc_stats(api, timesheet_url, arg_date=None): rows, date =", "office\" minutes_day = abs(end_hours - start_hours) * 60 minutes_day +=", "tasks found! %s\" % (date, time_start)) continue print(\"%s: %d tasks", "= wfh.replace(\"Fri\", \"Friday\") wfh = wfh.replace(\", \", \",\").replace(\",\", \" and", "wfh.replace(\"Thu\", \"Thursday\") wfh = wfh.replace(\"Fri\", \"Friday\") wfh = wfh.replace(\", \",", "6 hours and 30 minutes actual_h += int(work_minutes / 60)", "= os.environ.get('DEBUG', \"0\") == \"1\" AS_CSV = os.environ.get('CSV', \"0\") ==", "minutes_day % 60 total_time_for_date = str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2)", "total_minutes, total_time = 0, 0, \"\" for index, hour in", "%(wfh_info)s Hi, Daily Report for Date: %(date)s %(tasks)s %(notes)s Kind", "max_cols >= COL_DATE else None worked_at = row[COL_MOVE] if max_cols", "DEBUG: print(\"task: %s\" % (t)) print(\"groups: %s\" % len(g)) [task_number,", "(total_duration, test_duration)) print(\"\") print(\"\") # Time: %(start)s - %(end)s (%(duration)s", "in half_days) total_hours += local_hours total_minutes += local_minutes if total_minutes", "else: pass else: half_day = worked_date in half_days # each", "%s:%s (with 1 hour lunch: %s:%s)\" % (hours, minutes, lunch_hours,", "the first column that has today's date in it result_rows", "# text: SCAN-4167 As a developer, I want to update", "if arg == '' else arg load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name)", "6 def calc(hour, half_it=False, split_char = \":\"): parts = str(hour).split(split_char)", "get_timesheet_for_date(rows, date, user_full_name): # find the row with the first", "%s - %s\" % (t, str(ex))) continue if DEBUG: print(\"task:", "0 days = 0 for row in filtered: max_cols =", "calc(time_start) end_hours, end_minutes = calc(time_end) if start_hours == 0: print(\"%s:", "print(\"First:\", \"<first> not found\" if first is None else first[COL_DATE])", "return int(parts[0]), 0 except: return 0, 0 def get_client_secret_filenames(): filename", "\"???\", sheet.title or \"???\")) rows = sheet.values() return rows def", "is None or date is None: continue tasks = []", "task.split('\\n')[0] if '\\n' in task else task try: g =", "# print(\"Checking environment variable USER_FULL_NAME for spreadsheet URL...\") user_full_name =", "(worked_date, hours[index])) else: print(\" %s: %s\\t[%s:00 vs %s:%s] %s\" %", "summed up hours and minutes for the given/current month\") print(\"", "sheet [%s] opened. Accessing cell data ...\" % (sheets.title or", "row and str(row[COL_DATE]).startswith(date)] if filtered is None or not filtered:", "= None last = None for row in filtered: max_cols", "= abs(end_hours - start_hours) * 60 minutes_day += end_minutes -", "hours, half_hours = calc(duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours *", "actual_m = 0, 0 if not AS_CSV: print(\"*\" * 50)", "\" and \") wfh_extra = \"Next week\" if is_same_day else", "total_time)) print(\"\") print(\"*\" * 50) def main(): # print(\"Checking environment", "len(sys.argv) < 3 else sys.argv[2].strip() arg = \"read today\" if", "environment variable not set!\") user_full_name = \"<NAME>\" print(\"\") print(\"Usage: python", "duration if len(test_duration) <= 4: test_duration = \"0%s\" % duration", "or \"\").lower() check_start_index = found_index if weekday.startswith(\"fr\") else found_index -", "filtered = [row for row in rows if row and", "\"date\": date, \"user_full_name\": user_full_name, \"start\": start, \"end\": end, \"duration\": duration,", "re import sys import arrow from gsheets import Sheets CURRENT_PATH", "pylint: disable=W0703 now = arrow.now() date = now.format('YYYYMM') rows =", "not filtered: return None print(\"\") print(\"Found (%d) entries for date", "= 9 COL_TASKS_START = 10 SPECIAL_VALUES = [\"sick\", \"ab\", \"off\",", "else task try: g = r.match(t).groups() except Exception as ex:", "= \"\" the_date = arrow.get(str(date), 'YYYYMMDD') if the_date.weekday() in [SATURDAY,", "otherwise below\" # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167 As a", "one entry (%d) found for date %s! Please fix your", "for idx in range(COL_TASKS_START, max_cols): task = found_row[idx].strip() if task:", "print(\"*\" * 50) def main(): # print(\"Checking environment variable TIMESHEET_URL", "result_rows[0] found_index = rows.index(found_row) start_val = found_row[COL_TIME_START] end_val = found_row[COL_TIME_END]", "== 0: print(\"%s: Day off because of %s\" % (date,", "the time half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day: half_days[date]", "local_minutes if total_minutes >= 60: total_hours += (total_minutes / 60)", "sys import arrow from gsheets import Sheets CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))", "\"total_minutes\": total_minutes, } print(\"Total time for all tasks (%s): %s", "csv: task breakdown for the month and time spend on", "= {} first = None last = None for row", "'Additional Notes:\\n' for note in notes: result += '\\n* '", "\"task\", \"duration\", \"work_type\", \"details\"]) def w(task, duration_minutes, details = \"\"):", "no_webserver=False) print(\"Success.\") date = None if len(sys.argv) < 3 else", "if len(parts) == 1: try: return int(parts[0]), 0 except: return", "if tasks else \"\", \"notes\": format_notes(notes) if notes else \"\",", "if first is None: first = row else: last =", "off because of %s\" % (date, \"whatever\" if time_start ==", "/ 60).zfill(2) minutes = str(minutes % 60).zfill(2) lunch_hours = str(int(float(hours))", "if wfh != \"\" else \"all days\" # 2021-01-04 just", "# 2021-01-04 just make this the default for now wfh_info", "PTO\" if worked_at in ['o', 'O'] or \"OFFICE\" in notes.upper():", "is None: continue tasks = [] for idx in range(COL_TASKS_START,", "\"\").strip() if not timesheet_url: raise Exception(\"Please set the TIMESHEET_URL environment", "= 0 actual_h, actual_m = 0, 0 if not AS_CSV:", "or \"???\", sheet.title or \"???\")) rows = sheet.values() return rows", "[Daily Report] %(date)s WFH: %(wfh_info)s Hi, Daily Report for Date:", "str(ex))) continue if DEBUG: print(\"task: %s\" % (t)) print(\"groups: %s\"", "msg = \"\"\" [Daily Report] %(date)s WFH: %(wfh_info)s Hi, Daily", "date_str = str(date.format('YYYYMM')) return (rows, date_str) def export_csv(api, timesheet_url, arg_date):", "0 days = 0 expected_hours_accumulated_total = 0 for index, worked_date", "minutes = 6 hours and 30 minutes actual_h += int(work_minutes", "= parse_hours(start_val).format(\"HH:mm\") end = parse_hours(end_val).format(\"HH:mm\") duration = str(duration_val) notes_str =", "8 - (8 - work_hours_for_the_day) expected_minutes_accumulated_total = expected_hours_accumulated_total * 60", "print(\"Total time in %s: %s\" % (date, total_time)) print(\"\") print(\"*\"", "result_rows is None or not result_rows: return None if len(result_rows)", "if len(tasks) == 0: print(\"%s: no tasks found! %s\" %", "last = row total_hours, total_minutes, total_time = 0, 0, \"\"", "os.environ.get('CSV', \"0\") == \"1\" COL_DATE = 0 COL_WEEKDAY = 1", "unless mentioned otherwise below\" # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167", "a developer, I want to update AIScanRobo every week [", "row[COL_WORK_FROM_HOME] if half_day: extra_info += \" - half day PTO\"", "if end_val in SPECIAL_VALUES: print(\"You forgot to add your end", "8 if not half_day else 4 expected_hours_accumulated_total += 8 -", "quoting=csv.QUOTE_MINIMAL) # f.writerow(['<NAME>', 'Accounting', 'November']) f.writerow([\"username\", \"date\", \"task\", \"duration\", \"work_type\",", "not tasks: return '' result = 'Tasks:\\n' for task in", "for index, hour in enumerate(hours): date = dates[index] local_hours, local_minutes", "None or not filtered: return None csv_filename = os.path.join(os.getcwd(), \"%s.csv\"", "{ \"date\": date, \"user_full_name\": user_full_name, \"start\": start, \"end\": end, \"duration\":", "dates, hours = [], [] half_days = {} first =", "if not user_full_name: print(\"Warning: USER_FULL_NAME environment variable not set!\") user_full_name", "% (date, \"whatever\" if time_start == 0 else time_start)) continue", "hours_day - 1 minutes_day = minutes_day % 60 total_time_for_date =", "found_row[COL_TIME_START] end_val = found_row[COL_TIME_END] duration_val = found_row[COL_TIME_FIXED] max_cols = len(found_row)", "return (rows, date_str) def export_csv(api, timesheet_url, arg_date): rows, date =", "month filtered = [row for row in rows if row", "task: tasks.append(task) day_type = row[COL_TIME_START] if max_cols >= COL_TIME_START else", "= \"0%s\" % duration if total_duration != test_duration: print(\"\") print(\"\")", "output\") print(\" as CSV\") print(\"- daily: same as stats, except", "print(\"\") print(\"\"\"Tip: use \"DEBUG=1 timesheet <parameter>\" to enable debug output\"\"\")", "= 5 SUNDAY = 6 def calc(hour, half_it=False, split_char =", "% (csv_filename)) def calc_daily_hours_for_month(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api,", "if time_start is None or time_end is None or date", "total_minutes, } print(\"Total time for all tasks (%s): %s -", "row[idx].strip() if task: tasks.append(task) if len(tasks) == 0: print(\"%s: no", "if not AS_CSV: print(\"\") print(\"Found (%d) entries for date %s!\"", "\"\"\" [Daily Report] %(date)s WFH: %(wfh_info)s Hi, Daily Report for", "sheets.sheets[0] print(u\"Timesheet [%s] sheet [%s] opened. Accessing cell data ...\"", "client-secrets.json file ...\") secrets_file, cache_file = get_client_secret_filenames() sheets = Sheets.from_files(secrets_file,", "\"tasks\": format_tasks(tasks) if tasks else \"\", \"notes\": format_notes(notes) if notes", "I want to update AIScanRobo every week [1h] # 3", "30 minutes actual_h += int(work_minutes / 60) actual_m = work_minutes", "':' + str(minutes_day).zfill(2) days += 1 no_lunch = str(hours_day_without_lunch).zfill(2) +", "end_val: if end_val in SPECIAL_VALUES: print(\"You forgot to add your", "actual_h = work_hours # 330 minutes = 6 hours and", "today\" if len(sys.argv) < 2 else sys.argv[1].strip() if arg ==", "get_timesheet_for_date(rows, use_date, user_full_name) if timesheet: print(\"\\n\\n\") print(\"Timesheet for %s\" %", "cell data ...\" % (sheets.title or \"???\", sheet.title or \"???\"))", "{} first = None last = None for row in", "'\\n* ' + task else: result += '\\n* ' +", "continue tasks = [] for idx in range(COL_TASKS_START, max_cols): task", "0 for index, worked_date in enumerate(dates): days += 1 if", "print(\"%s: no tasks found! %s\" % (date, time_start)) continue print(\"%s:", "time_end is None or date is None: continue tasks =", "':' + str(minutes_day).zfill(2) print(\"%s: %s to %s = %s (without", "'YYYYMMDD').format('YYYYMMDD') except arrow.parser.ParserError: other_date = today use_date = other_date rows", "else None worked_at = row[COL_MOVE] if max_cols >= COL_MOVE else", "\"read today\" if arg == '' else arg load_sheet_and_read_data(sheets, timesheet_url,", "wfh.replace(\"Wed\", \"Wednesday\") wfh = wfh.replace(\"Thu\", \"Thursday\") wfh = wfh.replace(\"Fri\", \"Friday\")", "COL_TASKS_START = 10 SPECIAL_VALUES = [\"sick\", \"ab\", \"off\", \"wfh\", \"hol\"]", "None if check_start_index != found_index: # print(\"HA! GOT PREVS FRIDAY.\")", "check_start_index = found_index if weekday.startswith(\"fr\") else found_index - 7 check_row", "0, 0 def get_client_secret_filenames(): filename = os.path.join(CURRENT_PATH, \"client-secrets.json\") cachefile =", "else \"\", \"notes\": format_notes(notes) if notes else \"\", \"total_hours\": total_hours,", "%(date)s %(tasks)s %(notes)s Kind regards, %(user_full_name)s \"\"\".strip() % { \"date\":", "!= \"\" else \"all days\" # 2021-01-04 just make this", "of work, but on half days it is only half", "(hours * 60) + (6 * half_hours) if DEBUG: print(\"time:", "task_duration] = g hours, half_hours = calc(task_duration.replace(\"h\", \"\"), split_char=\".\") minutes", "= [\"sick\", \"ab\", \"off\", \"wfh\", \"hol\"] SATURDAY = 5 SUNDAY", "task = found_row[idx].strip() if task: t = task.split('\\n')[0] if '\\n'", "timesheet_url, date or arrow.now().format('YYYYMM')) elif arg == \"daily\": calc_daily_hours_for_month(sheets, timesheet_url,", "if the_date.weekday() in [SATURDAY, SUNDAY]: extra_info += \" - Weekend", "60 total_time = \"%d:%d hours:minutes\" % (total_hours, total_minutes) expected =", "else found_index - 7 check_row = found_row while (check_start_index <", "% (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes)) return msg def _load_sheet_data(api, timesheet_url,", "else first[COL_DATE]) print(\"Last:\", \"<last> not found\" if last is None", "return arrow.get(val, \"H:mm\") start = parse_hours(start_val).format(\"HH:mm\") end = parse_hours(end_val).format(\"HH:mm\") duration", "\"1\" AS_CSV = os.environ.get('CSV', \"0\") == \"1\" COL_DATE = 0", "\") wfh_extra = \"Next week\" if is_same_day else \"This week\"", "SPECIAL_VALUES: if not AS_CSV: print(\" %s: Off, because %s\" %", "2021-01-04 just make this the default for now wfh_info =", "get_client_secret_filenames(): filename = os.path.join(CURRENT_PATH, \"client-secrets.json\") cachefile = os.path.join(CURRENT_PATH, \"client-secrets-cache.json\") if", "or not filtered: return None if not AS_CSV: print(\"\") print(\"Found", "COL_MOVE = 7 COL_WORK_FROM_HOME = 8 COL_NOTES = 9 COL_TASKS_START", "...\" % (sheets.title or \"???\", sheet.title or \"???\")) rows =", "print(\"time: %s, %s $ %s $ %s\" % (hours, half_hours,", "hours total [%(total_hours)s:%(total_minutes)s]) msg = \"\"\" [Daily Report] %(date)s WFH:", "else None time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else", "file, as described here: https://github.com/xflr6/gsheets#quickstart\") return filename, cachefile def load_first_sheet_rows(api,", "== \"daily\": calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg ==", "check_start_index != found_index: # print(\"HA! GOT PREVS FRIDAY.\") is_same_day =", "half days it is only half of 8, aka 4.", "time entered: %s != %s\" % (total_duration, test_duration)) print(\"\") print(\"\")", "= 5 # includes lunch COL_TIME_FIXED = 6 # does", "'Accounting', 'November']) f.writerow([\"username\", \"date\", \"task\", \"duration\", \"work_type\", \"details\"]) def w(task,", "%s\" % (use_date)) print(timesheet) print(\"\\n\") else: print(\"No entry found for", "continue elif not tasks: continue # If it was a", "if max_cols >= COL_MOVE else None notes = row[COL_NOTES] if", "cachefile = os.path.join(CURRENT_PATH, \"client-secrets-cache.json\") if not os.path.exists(filename): filename = os.path.expanduser(os.path.join(\"~\",", "count half the time half_day = 'half' in row[COL_WORK_FROM_HOME] if", "were were selected weekday = (found_row[COL_WEEKDAY] or \"\").lower() check_start_index =", "1: print(\"More than one entry (%d) found for date %s!", "str(minutes % 60).zfill(2) lunch_hours = str(int(float(hours)) - days).zfill(2) print(\"\") print(\"Total", "# Time: %(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s]) msg", "\"Friday\") wfh = wfh.replace(\", \", \",\").replace(\",\", \" and \") wfh_extra", "or not filtered: return None print(\"\") print(\"Found (%d) entries for", "total_hours, total_minutes, total_time = 0, 0, \"\" for index, hour", "found! %s\" % (date, time_start)) continue print(\"%s: %d tasks found!\"", "r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") for row in filtered: max_cols = len(row)", "user_full_name, \"start\": start, \"end\": end, \"duration\": duration, \"wfh_info\": wfh_info, \"tasks\":", "now = arrow.now() today = now.format('YYYYMMDD') try: other_date = arrow.get(commandline,", "[task_number, task_details, duration] = g hours, half_hours = calc(duration.replace(\"h\", \"\"),", "total_time_minutes_from_tasks, total_hours, total_minutes)) return msg def _load_sheet_data(api, timesheet_url, arg_date=None): try:", "'YYYYMM') except Exception: # pylint: disable=W0703 now = arrow.now() date", "pass else: half_day = worked_date in half_days # each workday", "(len(filtered), date)) print(\"Writing to %s\" % (csv_filename)) with open(csv_filename, mode='w')", "== \"stats\": calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg ==", "task.split('\\n') if len(sub_tasks) > 1: result += '\\n* ' +", "!= 1: print(\"More than one entry (%d) found for date", "and not notes[0]): return '' result = 'Additional Notes:\\n' for", "0 work_minutes = 0 days = 0 expected_hours_accumulated_total = 0", "task_details, duration] = g hours, half_hours = calc(duration.replace(\"h\", \"\"), split_char=\".\")", "len(row) time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None", "today use_date = other_date rows = load_first_sheet_rows(api, timesheet_url, use_date) timesheet", "the_date.weekday() in [SATURDAY, SUNDAY]: extra_info += \" - Weekend work\"", "/ 60) hours_day_without_lunch = hours_day - 1 minutes_day = minutes_day", "f.writerow([\"username\", \"date\", \"task\", \"duration\", \"work_type\", \"details\"]) def w(task, duration_minutes, details", "work_hours_for_the_day = 8 if not half_day else 4 expected_hours_accumulated_total +=", "accordingly.\") # print(\"Checking environment variable USER_FULL_NAME for spreadsheet URL...\") user_full_name", "Please fix your sheet!\" % (len(result_rows), date)) return None found_row", "% 60 total_time = \"%d:%d hours:minutes\" % (total_hours, total_minutes) expected", "\"1\" COL_DATE = 0 COL_WEEKDAY = 1 COL_TIME_START = 2", "week\" if is_same_day else \"This week\" wfh_info = \"\"\"%s %s\"\"\"", "print(\"Usage: python timesheet.py [command|date] [date]\") print(\"Example: python timesheet.py stats 202011\")", "if notes else \"\", \"total_hours\": total_hours, \"total_minutes\": total_minutes, } print(\"Total", "Friday), to see what work from home # days were", "def main(): # print(\"Checking environment variable TIMESHEET_URL for spreadsheet URL...\")", "else \"This week\" wfh_info = \"\"\"%s %s\"\"\" % (wfh_extra, wfh)", "u\"\" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME] wfh = wfh.replace(\"Mon\",", "_load_sheet_data(api, timesheet_url, arg_date=None): try: date = arrow.get(arg_date, 'YYYYMM') except Exception:", "# includes lunch COL_TIME_FIXED = 6 # does not include", "# As a developer, I want to update AIScanRobo every", "it is only half of 8, aka 4. work_hours_for_the_day =", "= local_minutes / 2 return local_hours, local_minutes except: if len(parts)", "= worked_date in half_days # each workday has 8 hours", "if timesheet: print(\"\\n\\n\") print(\"Timesheet for %s\" % (use_date)) print(timesheet) print(\"\\n\")", "# find the rows for the given month filtered =", "= 0 days = 0 for row in filtered: max_cols", "max_cols >= COL_TIME_START else None time_end = row[COL_TIME_END] if max_cols", "minutes_day += end_minutes - start_minutes minutes += minutes_day hours_day =", "test_duration = \"0%s\" % duration if total_duration != test_duration: print(\"\")", "tasks found!\" % (date, len(tasks))) for task in tasks: t", "%s\" % (t)) print(\"groups: %s\" % len(g)) [task_number, task_details, task_duration]", "hours_day_without_lunch = hours_day - 1 minutes_day = minutes_day % 60", "\"???\")) rows = sheet.values() return rows def load_sheet_and_read_data(api, timesheet_url, commandline,", "== 1 and not notes[0]): return '' result = 'Additional", "calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg == \"csv\": export_csv(sheets,", "def get_timesheet_for_date(rows, date, user_full_name): # find the row with the", "\"%s %s\" % (task_number, task_details[:-1].strip()) w(task_number, minutes, details.strip()) print(\"\") print(\"CSV", "local_hours = int(parts[0]) local_minutes = int(parts[1]) if half_it: local_hours =", "...\" % (date)) sheets = api.get(timesheet_url) sheet = sheets.sheets[0] print(u\"Timesheet", "or date is None: continue tasks = [] for idx", "== 0 else time_start)) continue extra_info = \"\" the_date =", "else: last = row total_hours, total_minutes, total_time = 0, 0,", "as described here: https://github.com/xflr6/gsheets#quickstart\") return filename, cachefile def load_first_sheet_rows(api, timesheet_url,", "coding: utf-8 -*- # # from __future__ import print_function import", "regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167 As a developer, I want", "= rows.index(found_row) start_val = found_row[COL_TIME_START] end_val = found_row[COL_TIME_END] duration_val =", "(check_start_index < found_index): check_row = rows[check_start_index] if (len(check_row) > COL_WEEKDAY", "to update AIScanRobo every week [1h] # 3 groups: #", "os.path.expanduser(os.path.join(\"~\", \"client-secrets-cache.json\")) if not os.path.exists(filename): raise Exception(\"Please provide a client-secret.json", "not found\" if first is None else first[COL_DATE]) print(\"Last:\", \"<last>", "\"0\") == \"1\" AS_CSV = os.environ.get('CSV', \"0\") == \"1\" COL_DATE", "details = \"%s %s\" % (task_number, task_details[:-1].strip()) w(task_number, minutes, details.strip())", "\"end\": end, \"duration\": duration, \"wfh_info\": wfh_info, \"tasks\": format_tasks(tasks) if tasks", "last is None else last[COL_DATE]) print(\"\") print(\"Total time in %s:", "0 if not AS_CSV: print(\"*\" * 50) print(\"\") print(\"Valid hours", "user_full_name): # find the row with the first column that", "duration, \"wfh_info\": wfh_info, \"tasks\": format_tasks(tasks) if tasks else \"\", \"notes\":", "python timesheet.py stats\\\" to format the output\") print(\" as CSV\")", "sys.argv[2].strip() arg = \"read today\" if len(sys.argv) < 2 else", "= arrow.get(arg_date, 'YYYYMM') except Exception: # pylint: disable=W0703 now =", "(hours * 60) + (6 * half_hours) total_time_minutes_from_tasks += minutes", "local_hours total_minutes += local_minutes if total_minutes >= 60: total_hours +=", "extra_info += \" - Commute to office\" minutes_day = abs(end_hours", "arg_date=None): rows, date = _load_sheet_data(api, timesheet_url, arg_date) # find the", "date or arrow.now().format('YYYYMM')) else: date_to_use = \"read today\" if arg", "enumerate(hours): date = dates[index] local_hours, local_minutes = calc(hour, date in", "week [ # 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") total_time_minutes_from_tasks = 0", "arg load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name) print(\"Done.\") if __name__ == \"__main__\":", "row with the first column that has today's date in", "(without lunch: %s)%s\" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info))", "import csv import os import re import sys import arrow", "Sheets.from_files(secrets_file, cache_file, no_webserver=False) print(\"Success.\") date = None if len(sys.argv) <", "= found_index if weekday.startswith(\"fr\") else found_index - 7 check_row =", "is not Friday), to see what work from home #", "COL_DATE else None if day_type is None: continue if day_type", "\"Half day\" if half_day else \"\")) if not AS_CSV: print(\"\")", "task times do not add up! Tasks vs time entered:", "half_hours, duration, minutes)) details = \"%s %s\" % (task_number, task_details[:-1].strip())", "None last = None for row in filtered: max_cols =", "== \"1\" COL_DATE = 0 COL_WEEKDAY = 1 COL_TIME_START =", "except arrow.parser.ParserError: other_date = today use_date = other_date rows =", "export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) else: date_to_use = \"read today\"", "stats 202011\") print(\"Example: python timesheet.py 20201130\") print(\"\") print(\"Available commands:\") print(\"-", "to load client-secrets.json file ...\") secrets_file, cache_file = get_client_secret_filenames() sheets", "week\" wfh_info = \"\"\"%s %s\"\"\" % (wfh_extra, wfh) if wfh", "deduct_work_hours = 0 work_hours = 0 work_minutes = 0 days", "today\" if arg == '' else arg load_sheet_and_read_data(sheets, timesheet_url, date_to_use,", "\"\").lower() check_start_index = found_index if weekday.startswith(\"fr\") else found_index - 7", "calc(duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours * 60) + (6", "date %s!\" % (len(filtered), date)) print(\"Writing to %s\" % (csv_filename))", "includes lunch COL_TIME_FIXED = 6 # does not include lunch", "if not os.path.exists(filename): filename = os.path.expanduser(os.path.join(\"~\", \"client-secrets.json\")) cachefile = os.path.expanduser(os.path.join(\"~\",", "print(\"Found (%d) entries for date %s!\" % (len(filtered), date)) dates,", "else None notes = row[COL_NOTES] if max_cols >= COL_NOTES else", "rows if row and str(row[COL_DATE]).startswith(date)] if filtered is None or", "work_hours_for_the_day) expected_minutes_accumulated_total = expected_hours_accumulated_total * 60 # hours[index] is the", "# hours[index] is the actual time worked, e.g. 6:30 means", "not start_val: if start_val in SPECIAL_VALUES: print(\"You forgot to add", "if not end_val: if end_val in SPECIAL_VALUES: print(\"You forgot to", "arrow.get(str(date), 'YYYYMMDD') if the_date.weekday() in [SATURDAY, SUNDAY]: extra_info += \"", "same as stats, except ready to email to HR\") print(\"-", "None else last[COL_DATE]) print(\"\") print(\"Total time in %s: %s\" %", "- days).zfill(2) print(\"\") print(\"Total days worked: %s\" % str(days)) print(\"Total", "time_end is None or date is None: continue start_hours, start_minutes", "len(hours)) deduct_work_hours = 0 work_hours = 0 work_minutes = 0", "= _load_sheet_data(api, timesheet_url, arg_date) # find the rows for the", "COL_DATE else None if time_start is None or time_end is", "= 8 if not half_day else 4 expected_hours_accumulated_total += 8", "found_row while (check_start_index < found_index): check_row = rows[check_start_index] if (len(check_row)", "1 no_lunch = str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2) print(\"%s: %s", "print(\"Timesheet for %s\" % (use_date)) print(timesheet) print(\"\\n\") else: print(\"No entry", "found\" if last is None else last[COL_DATE]) print(\"\") print(\"Total time", "every week [ # 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") total_time_minutes_from_tasks =", "entries for date %s!\" % (len(filtered), date)) dates, hours =", "FRIDAY.\") is_same_day = False else: # print(\"SAME DAY\") is_same_day =", "for date %s!\" % (len(filtered), date)) dates, hours = [],", "now.format('YYYYMMDD') try: other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD') except arrow.parser.ParserError: other_date =", "print(\"\") print(\"Valid hours entries: %s\\t[required vs actual]\" % len(hours)) deduct_work_hours", "row[COL_MOVE] if max_cols >= COL_MOVE else None notes = row[COL_NOTES]", "entry (if today is not Friday), to see what work", "total_minutes)) return msg def _load_sheet_data(api, timesheet_url, arg_date=None): try: date =", "yet.\") # return None def parse_hours(val): try: return arrow.get(val, \"HH:mm\")", "if not os.path.exists(filename): raise Exception(\"Please provide a client-secret.json file, as", "60).zfill(2) minutes = str(minutes % 60).zfill(2) lunch_hours = str(int(float(hours)) -", "\"client-secrets.json\")) cachefile = os.path.expanduser(os.path.join(\"~\", \"client-secrets-cache.json\")) if not os.path.exists(filename): raise Exception(\"Please", "\\\"CSV=1 python timesheet.py stats\\\" to format the output\") print(\" as", "% duration if total_duration != test_duration: print(\"\") print(\"\") print(\"The task", "print(\"The task times do not add up! Tasks vs time", "= 'Additional Notes:\\n' for note in notes: result += '\\n*", "lunch: %s)%s\" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info)) hours", "sheet.title or \"???\")) rows = sheet.values() return rows def load_sheet_and_read_data(api,", "(hours, half_hours, duration, minutes)) details = \"%s %s\" % (task_number,", ">= COL_NOTES else \"\" if time_start is None or time_end", "(total_minutes / 60) total_minutes = total_minutes % 60 total_time =", "worked: %s\" % str(days)) print(\"Total hours: %s:%s (with 1 hour", "\"at all times, unless mentioned otherwise below\" # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))", "= 0 work_hours = 0 work_minutes = 0 days =", "= \"%s %s\" % (task_number, task_details[:-1].strip()) w(task_number, minutes, details.strip()) print(\"\")", "+ task else: result += '\\n* ' + task return", "duration] = g hours, half_hours = calc(duration.replace(\"h\", \"\"), split_char=\".\") minutes", "extra_info)) hours = str(minutes / 60).zfill(2) minutes = str(minutes %", "or \"\").lower().startswith(\"fr\"): break check_start_index += 1 is_same_day = None if", "return local_hours, local_minutes except: if len(parts) == 1: try: return", "hours.append(time) dates.append(date) if first is None: first = row else:", "local_hours / 2 local_minutes = local_minutes / 2 return local_hours,", "= today use_date = other_date rows = load_first_sheet_rows(api, timesheet_url, use_date)", "if \"meeting\" in details.lower() else \"Development\" # Needed CSV columns", "work_minutes % 60 if AS_CSV: print(\"%s;%s;\" % (worked_date, hours[index])) else:", "print(\"Trying to load client-secrets.json file ...\") secrets_file, cache_file = get_client_secret_filenames()", "os.path.expanduser(os.path.join(\"~\", \"client-secrets.json\")) cachefile = os.path.expanduser(os.path.join(\"~\", \"client-secrets-cache.json\")) if not os.path.exists(filename): raise", "str(row[COL_DATE]) == date] if result_rows is None or not result_rows:", "os.path.exists(filename): filename = os.path.expanduser(os.path.join(\"~\", \"client-secrets.json\")) cachefile = os.path.expanduser(os.path.join(\"~\", \"client-secrets-cache.json\")) if", "print(\"HA! GOT PREVS FRIDAY.\") is_same_day = False else: # print(\"SAME", "file ...\") secrets_file, cache_file = get_client_secret_filenames() sheets = Sheets.from_files(secrets_file, cache_file,", "if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME] wfh = wfh.replace(\"Mon\", \"Monday\")", "extra_info = \"\" the_date = arrow.get(str(date), 'YYYYMMDD') if the_date.weekday() in", "other_lines = task.split('\\n')[1:] tasks.append(\"%s %s\\n%s\" % (task_number.strip(), task_details[:-2].strip(), '\\n'.join(other_lines))) def", "print(\"SAME DAY\") is_same_day = True wfh = u\"\" if len(check_row)-1", "USER_FULL_NAME environment variable not set!\") user_full_name = \"<NAME>\" print(\"\") print(\"Usage:", "= work_minutes % 60 if AS_CSV: print(\"%s;%s;\" % (worked_date, hours[index]))", "wfh.replace(\"Tue\", \"Tuesday\") wfh = wfh.replace(\"Wed\", \"Wednesday\") wfh = wfh.replace(\"Thu\", \"Thursday\")", "+ sub_task result += '\\n' else: result += '\\n* '", "= found_row while (check_start_index < found_index): check_row = rows[check_start_index] if", "\"%s.csv\" % (arg_date)) print(\"\") print(\"Found (%d) entries for date %s!\"", "hours.append(time) dates.append(date) continue elif not tasks: continue # If it", "off, then only count half the time half_day = 'half'", "first[COL_DATE]) print(\"Last:\", \"<last> not found\" if last is None else", "as ex: print(\"ERROR: %s - %s\" % (t, str(ex))) continue", "= total_minutes % 60 total_time = \"%d:%d hours:minutes\" % (total_hours,", "with open(csv_filename, mode='w') as f: f = csv.writer(f, delimiter=',', quotechar='\"',", "the rows for the given month filtered = [row for", "in notes.upper(): extra_info += \" - Commute to office\" minutes_day", "and 30 minutes local_h, local_m = calc(hours[index]) work_hours += local_h", "+= minutes_day hours_day = int(minutes_day / 60) hours_day_without_lunch = hours_day", "that has today's date in it result_rows = [row for", "20201130\") print(\"\") print(\"Available commands:\") print(\"- stats: show summed up hours", "date)) dates, hours = [], [] half_days = {} first", "use_date) timesheet = get_timesheet_for_date(rows, use_date, user_full_name) if timesheet: print(\"\\n\\n\") print(\"Timesheet", "(date, \"whatever\" if time_start == 0 else time_start)) continue extra_info", "# f.writerow(['<NAME>', 'Accounting', 'November']) f.writerow([\"username\", \"date\", \"task\", \"duration\", \"work_type\", \"details\"])", "= True wfh = u\"\" if len(check_row)-1 < COL_WORK_FROM_HOME else", "idx in range(COL_TASKS_START, max_cols): task = row[idx].strip() if task: tasks.append(task)", "g hours, half_hours = calc(task_duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours", "[date]\") print(\"Example: python timesheet.py stats 202011\") print(\"Example: python timesheet.py 20201130\")", "\"wfh_info\": wfh_info, \"tasks\": format_tasks(tasks) if tasks else \"\", \"notes\": format_notes(notes)", "str(minutes_day).zfill(2) days += 1 no_lunch = str(hours_day_without_lunch).zfill(2) + ':' +", "if row and str(row[COL_DATE]) == date] if result_rows is None", "\"date\", \"task\", \"duration\", \"work_type\", \"details\"]) def w(task, duration_minutes, details =", "\" - Weekend work\" half_day = 'half' in row[COL_WORK_FROM_HOME] if", "minutes_day = minutes_day % 60 total_time_for_date = str(hours_day).zfill(2) + ':'", "if task: tasks.append(task) day_type = row[COL_TIME_START] if max_cols >= COL_TIME_START", "result += '\\n* ' + note return result total_hours =", "\"\" else \"all days\" # 2021-01-04 just make this the", "or arrow.now().format('YYYYMM')) else: date_to_use = \"read today\" if arg ==", "print(\"%s: Day off because of %s\" % (date, \"whatever\" if", "(task_number, task_details[:-1].strip()) w(task_number, minutes, details.strip()) print(\"\") print(\"CSV output to: %s\"", "if hours[index] in SPECIAL_VALUES: if not AS_CSV: print(\" %s: Off,", "variable accordingly.\") # print(\"Checking environment variable USER_FULL_NAME for spreadsheet URL...\")", "check_row = found_row while (check_start_index < found_index): check_row = rows[check_start_index]", "because of %s\" % (date, \"whatever\" if time_start == 0", "wfh.replace(\"Fri\", \"Friday\") wfh = wfh.replace(\", \", \",\").replace(\",\", \" and \")", "= Sheets.from_files(secrets_file, cache_file, no_webserver=False) print(\"Success.\") date = None if len(sys.argv)", "date %s!\" % (len(filtered), date)) minutes = 0 days =", "= calc(duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours * 60) +", "= str(minutes / 60).zfill(2) minutes = str(minutes % 60).zfill(2) lunch_hours", "%s\" % (date, time_start)) continue print(\"%s: %d tasks found!\" %", "(%s): %s - %s:%s\" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes)) return", "else arg load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name) print(\"Done.\") if __name__ ==", "None or not result_rows: return None if len(result_rows) != 1:", "time_start)) continue extra_info = \"\" the_date = arrow.get(str(date), 'YYYYMMDD') if", "[row for row in rows if row and str(row[COL_DATE]) ==", "delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL) # f.writerow(['<NAME>', 'Accounting', 'November']) f.writerow([\"username\", \"date\", \"task\",", "%s, %s $ %s $ %s\" % (hours, half_hours, duration,", "row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None tasks = []", "max_cols >= COL_TIME_END else None date = row[COL_DATE] if max_cols", "%s:%s)\" % (hours, minutes, lunch_hours, minutes)) print(\"\") def calc_stats(api, timesheet_url,", "in row[COL_WORK_FROM_HOME] if half_day: extra_info += \" - half day", "last = None for row in filtered: max_cols = len(row)", "\"duration\": duration, \"wfh_info\": wfh_info, \"tasks\": format_tasks(tasks) if tasks else \"\",", "vs %s:%s] %s\" % (worked_date, hours[index], expected_hours_accumulated_total, str(actual_h).zfill(2), str(actual_m).zfill(2), \"Half", "elif not tasks: continue # If it was a half", "if start_hours == 0: print(\"%s: Day off because of %s\"", "arg == \"stats\": calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg", "+ task return result def format_notes(notes): if not notes or", "sub_task result += '\\n' else: result += '\\n* ' +", "(rows, date_str) def export_csv(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api,", "\"read today\" if len(sys.argv) < 2 else sys.argv[1].strip() if arg", "# days were were selected weekday = (found_row[COL_WEEKDAY] or \"\").lower()", "!= %s\" % (total_duration, test_duration)) print(\"\") print(\"\") # Time: %(start)s", "end = parse_hours(end_val).format(\"HH:mm\") duration = str(duration_val) notes_str = found_row[COL_NOTES] notes", "if check_start_index != found_index: # print(\"HA! GOT PREVS FRIDAY.\") is_same_day", "csv import os import re import sys import arrow from", "calc_daily_hours_for_month(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api, timesheet_url, arg_date) filtered", "Weekend work\" half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day: extra_info", "(len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes)) return msg def _load_sheet_data(api, timesheet_url, arg_date=None):", "[] for idx in range(COL_TASKS_START, max_cols): task = found_row[idx].strip() if", "task return result def format_notes(notes): if not notes or (len(notes)", "60 # hours[index] is the actual time worked, e.g. 6:30", "start_val in SPECIAL_VALUES: print(\"You forgot to add your start time.\")", "minutes actual_h += int(work_minutes / 60) actual_m = work_minutes %", "means 6 hours and 30 minutes local_h, local_m = calc(hours[index])", ">= COL_TIME_FIXED else None time_start = row[COL_TIME_START] if max_cols >=", "= [row for row in rows if row and str(row[COL_DATE])", "is_same_day = None if check_start_index != found_index: # print(\"HA! GOT", "end_minutes = calc(time_end) if start_hours == 0: print(\"%s: Day off", "% (worked_date, hours[index], expected_hours_accumulated_total, str(actual_h).zfill(2), str(actual_m).zfill(2), \"Half day\" if half_day", "+= '\\n* ' + note return result total_hours = str(int(total_time_minutes_from_tasks", "expected_hours_accumulated_total, str(actual_h).zfill(2), str(actual_m).zfill(2), \"Half day\" if half_day else \"\")) if", "else \"\" if time_start is None or time_end is None", "if time_start == 0 else time_start)) continue extra_info = \"\"", "developer, I want to update AIScanRobo every week [ #", "date %s! Please fix your sheet!\" % (len(result_rows), date)) return", "wfh = u\"\" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME] wfh", "COL_WORK_FROM_HOME = 8 COL_NOTES = 9 COL_TASKS_START = 10 SPECIAL_VALUES", "len(g)) [task_number, task_details, task_duration] = g hours, half_hours = calc(task_duration.replace(\"h\",", "%d tasks found!\" % (date, len(tasks))) for task in tasks:", "what work from home # days were were selected weekday", "USER_FULL_NAME for spreadsheet URL...\") user_full_name = os.environ.get('USER_FULL_NAME', \"\").strip() if not", "date = row[COL_DATE] if max_cols >= COL_DATE else None worked_at", "print(\"Opening timesheet for %s ...\" % (date)) sheets = api.get(timesheet_url)", "Sheets CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG = os.environ.get('DEBUG', \"0\") == \"1\"", "task else: result += '\\n* ' + task return result", "# from __future__ import print_function import csv import os import", "= row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None time_start =", "print(\"You forgot to add your start time.\") return None if", "= row[COL_DATE] if max_cols >= COL_DATE else None worked_at =", "60).zfill(2) lunch_hours = str(int(float(hours)) - days).zfill(2) print(\"\") print(\"Total days worked:", "work\" half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day: extra_info +=", "\"DEBUG=1 timesheet <parameter>\" to enable debug output\"\"\") print(\"\") print(\"Trying to", ">= COL_DATE else None worked_at = row[COL_MOVE] if max_cols >=", "50) print(\"\") print(\"Valid hours entries: %s\\t[required vs actual]\" % len(hours))", "actual_h, actual_m = 0, 0 if not AS_CSV: print(\"*\" *", "entered yet.\") # return None def parse_hours(val): try: return arrow.get(val,", "f.writerow([\"daniel\", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, \"%dm\" % (duration_minutes), work_type, details]) #", "days = 0 expected_hours_accumulated_total = 0 for index, worked_date in", "format_notes(notes): if not notes or (len(notes) == 1 and not", "date = None if len(sys.argv) < 3 else sys.argv[2].strip() arg", "SATURDAY = 5 SUNDAY = 6 def calc(hour, half_it=False, split_char", "4. work_hours_for_the_day = 8 if not half_day else 4 expected_hours_accumulated_total", "+= 8 - (8 - work_hours_for_the_day) expected_minutes_accumulated_total = expected_hours_accumulated_total *", "today's date in it result_rows = [row for row in", "= wfh.replace(\"Thu\", \"Thursday\") wfh = wfh.replace(\"Fri\", \"Friday\") wfh = wfh.replace(\",", "= str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2) print(\"%s: %s to %s", "5 SUNDAY = 6 def calc(hour, half_it=False, split_char = \":\"):", "cache_file, no_webserver=False) print(\"Success.\") date = None if len(sys.argv) < 3", "<= 4: test_duration = \"0%s\" % duration if total_duration !=", "found_index - 7 check_row = found_row while (check_start_index < found_index):", "weekday.startswith(\"fr\") else found_index - 7 check_row = found_row while (check_start_index", "+= '\\n* ' + task return result def format_notes(notes): if", "sub_task in sub_tasks[1:]: # actual sub tasks result += '\\n\\t'", "in tasks: if '\\n' in task: sub_tasks = task.split('\\n') if", "half_day = worked_date in half_days # each workday has 8", "rows.index(found_row) start_val = found_row[COL_TIME_START] end_val = found_row[COL_TIME_END] duration_val = found_row[COL_TIME_FIXED]", "Exception(\"Please set the TIMESHEET_URL environment variable accordingly.\") # print(\"Checking environment", "else sys.argv[1].strip() if arg == \"stats\": calc_stats(sheets, timesheet_url, date or", "result += '\\n* ' + task else: result += '\\n*", "date_str) def export_csv(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api, timesheet_url,", "time in %s: %s\" % (date, total_time)) print(\"\") print(\"*\" *", "= wfh.replace(\"Wed\", \"Wednesday\") wfh = wfh.replace(\"Thu\", \"Thursday\") wfh = wfh.replace(\"Fri\",", "because %s\" % (worked_date, hours[index])) else: pass else: half_day =", "timesheet = get_timesheet_for_date(rows, use_date, user_full_name) if timesheet: print(\"\\n\\n\") print(\"Timesheet for", "False else: # print(\"SAME DAY\") is_same_day = True wfh =", "[\"sick\", \"ab\", \"off\", \"wfh\", \"hol\"] SATURDAY = 5 SUNDAY =", "find the row with the first column that has today's", "entries for date %s!\" % (len(filtered), date)) print(\"Writing to %s\"", "total_hours += local_hours total_minutes += local_minutes if total_minutes >= 60:", "%s: %s\" % (date, total_time)) print(\"\") print(\"*\" * 50) def", "task: t = task.split('\\n')[0] if '\\n' in task else task", "hours[index], expected_hours_accumulated_total, str(actual_h).zfill(2), str(actual_m).zfill(2), \"Half day\" if half_day else \"\"))", "[SATURDAY, SUNDAY]: extra_info += \" - Weekend work\" half_day =", "breakdown for the month and time spend on each task\")", "As a developer, I want to update AIScanRobo every week", "work, but on half days it is only half of", "actual time worked, e.g. 6:30 means 6 hours and 30", "lunch COL_MOVE = 7 COL_WORK_FROM_HOME = 8 COL_NOTES = 9", "is None: continue start_hours, start_minutes = calc(time_start) end_hours, end_minutes =", "= notes_str.split('\\n') # check the previous Friday entry (if today", "on each task\") print(\"\") print(\"\"\"Tip: use \"DEBUG=1 timesheet <parameter>\" to", "% (len(filtered), date)) print(\"Writing to %s\" % (csv_filename)) with open(csv_filename,", "took half a day off, then only count half the", "\"<last> not found\" if last is None else last[COL_DATE]) print(\"\")", "continue if day_type in SPECIAL_VALUES: time = day_type hours.append(time) dates.append(date)", "use_date) def get_timesheet_for_date(rows, date, user_full_name): # find the row with", "in range(COL_TASKS_START, max_cols): task = row[idx].strip() if task: tasks.append(task) if", "but on half days it is only half of 8,", "% (arg_date)) print(\"\") print(\"Found (%d) entries for date %s!\" %", "is None or not filtered: return None if not AS_CSV:", "output to: %s\" % (csv_filename)) def calc_daily_hours_for_month(api, timesheet_url, arg_date): rows,", "environment variable TIMESHEET_URL for spreadsheet URL...\") timesheet_url = os.environ.get('TIMESHEET_URL', \"\").strip()", "environment variable USER_FULL_NAME for spreadsheet URL...\") user_full_name = os.environ.get('USER_FULL_NAME', \"\").strip()", "COL_WEEKDAY and check_row[COL_WEEKDAY] or \"\").lower().startswith(\"fr\"): break check_start_index += 1 is_same_day", "= os.environ.get('USER_FULL_NAME', \"\").strip() if not user_full_name: print(\"Warning: USER_FULL_NAME environment variable", "not user_full_name: print(\"Warning: USER_FULL_NAME environment variable not set!\") user_full_name =", "check_row[COL_WORK_FROM_HOME] wfh = wfh.replace(\"Mon\", \"Monday\") wfh = wfh.replace(\"Tue\", \"Tuesday\") wfh", "COL_TIME_FIXED = 6 # does not include lunch COL_MOVE =", "None: continue start_hours, start_minutes = calc(time_start) end_hours, end_minutes = calc(time_end)", "= 8 COL_NOTES = 9 COL_TASKS_START = 10 SPECIAL_VALUES =", "\"Monday\") wfh = wfh.replace(\"Tue\", \"Tuesday\") wfh = wfh.replace(\"Wed\", \"Wednesday\") wfh", "'\\n* ' + note return result total_hours = str(int(total_time_minutes_from_tasks /", "notes else \"\", \"total_hours\": total_hours, \"total_minutes\": total_minutes, } print(\"Total time", "filtered is None or not filtered: return None csv_filename =", "extra_info += \" - Weekend work\" half_day = 'half' in", "\"total_hours\": total_hours, \"total_minutes\": total_minutes, } print(\"Total time for all tasks", "% (date, total_time)) print(\"\") print(\"*\" * 50) def main(): #", "print(\"\") def calc_stats(api, timesheet_url, arg_date=None): rows, date = _load_sheet_data(api, timesheet_url,", "to office\" minutes_day = abs(end_hours - start_hours) * 60 minutes_day", "first = None last = None for row in filtered:", "not filtered: return None csv_filename = os.path.join(os.getcwd(), \"%s.csv\" % (arg_date))", "task, \"%dm\" % (duration_minutes), work_type, details]) # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) #", "= row[COL_TIME_END] if max_cols >= COL_TIME_END else None date =", "not timesheet_url: raise Exception(\"Please set the TIMESHEET_URL environment variable accordingly.\")", "use \"DEBUG=1 timesheet <parameter>\" to enable debug output\"\"\") print(\"\") print(\"Trying", "'\\n' in task else task try: g = r.match(t).groups() except", "half of 8, aka 4. work_hours_for_the_day = 8 if not", "row[COL_TIME_START] if max_cols >= COL_TIME_START else None date = row[COL_DATE]", "rows for the given month filtered = [row for row", "is the actual time worked, e.g. 6:30 means 6 hours", "in task: sub_tasks = task.split('\\n') if len(sub_tasks) > 1: result", "and str(row[COL_DATE]).startswith(date)] if filtered is None or not filtered: return", "calc_stats(api, timesheet_url, arg_date=None): rows, date = _load_sheet_data(api, timesheet_url, arg_date) #", "msg def _load_sheet_data(api, timesheet_url, arg_date=None): try: date = arrow.get(arg_date, 'YYYYMM')", "entries for date %s!\" % (len(filtered), date)) minutes = 0", "result total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2) total_minutes = str(total_time_minutes_from_tasks %", "* 60) + (6 * half_hours) if DEBUG: print(\"time: %s,", "= str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2) days += 1 no_lunch", "\"H:mm\") start = parse_hours(start_val).format(\"HH:mm\") end = parse_hours(end_val).format(\"HH:mm\") duration = str(duration_val)", "if max_cols >= COL_TIME_FIXED else None time_start = row[COL_TIME_START] if", "# find the row with the first column that has", "hours and minutes for the given/current month\") print(\" use \\\"CSV=1", "return '' result = 'Tasks:\\n' for task in tasks: if", "date) date_str = str(date.format('YYYYMM')) return (rows, date_str) def export_csv(api, timesheet_url,", "include lunch COL_MOVE = 7 COL_WORK_FROM_HOME = 8 COL_NOTES =", "notes or (len(notes) == 1 and not notes[0]): return ''", "to enable debug output\"\"\") print(\"\") print(\"Trying to load client-secrets.json file", "max_cols): task = row[idx].strip() if task: tasks.append(task) if len(tasks) ==", "[] half_days = {} first = None last = None", "row[idx].strip() if task: tasks.append(task) day_type = row[COL_TIME_START] if max_cols >=", "os import re import sys import arrow from gsheets import", "= os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG = os.environ.get('DEBUG', \"0\") == \"1\" AS_CSV =", "want to update AIScanRobo every week [1h] # 3 groups:", "(t)) print(\"groups: %s\" % len(g)) [task_number, task_details, duration] = g", "3 groups: # SCAN-4167 # As a developer, I want", "AS_CSV = os.environ.get('CSV', \"0\") == \"1\" COL_DATE = 0 COL_WEEKDAY", "minutes other_lines = task.split('\\n')[1:] tasks.append(\"%s %s\\n%s\" % (task_number.strip(), task_details[:-2].strip(), '\\n'.join(other_lines)))", "%s\" % str(days)) print(\"Total hours: %s:%s (with 1 hour lunch:", "notes/tasks entered yet.\") # return None def parse_hours(val): try: return", "for sub_task in sub_tasks[1:]: # actual sub tasks result +=", "Exception as ex: print(\"ERROR: %s - %s\" % (t, str(ex)))", "% (csv_filename)) with open(csv_filename, mode='w') as f: f = csv.writer(f,", "row and str(row[COL_DATE]) == date] if result_rows is None or", "each task\") print(\"\") print(\"\"\"Tip: use \"DEBUG=1 timesheet <parameter>\" to enable", "= None if check_start_index != found_index: # print(\"HA! GOT PREVS", "def format_notes(notes): if not notes or (len(notes) == 1 and", "try: g = r.match(t).groups() except Exception as ex: print(\"ERROR: %s", "60: total_hours += (total_minutes / 60) total_minutes = total_minutes %", "w(task, duration_minutes, details = \"\"): work_type = \"Meeting\" if \"meeting\"", "is None or not filtered: return None print(\"\") print(\"Found (%d)", "0, \"\" for index, hour in enumerate(hours): date = dates[index]", "def _load_sheet_data(api, timesheet_url, arg_date=None): try: date = arrow.get(arg_date, 'YYYYMM') except", "len(g)) [task_number, task_details, duration] = g hours, half_hours = calc(duration.replace(\"h\",", "= found_row[COL_TIME_END] duration_val = found_row[COL_TIME_FIXED] max_cols = len(found_row) if not", "in SPECIAL_VALUES: print(\"You forgot to add your start time.\") return", "None worked_at = row[COL_MOVE] if max_cols >= COL_MOVE else None", "else: print(\" %s: %s\\t[%s:00 vs %s:%s] %s\" % (worked_date, hours[index],", "import Sheets CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) DEBUG = os.environ.get('DEBUG', \"0\") ==", "SPECIAL_VALUES: time = day_type hours.append(time) dates.append(date) continue elif not tasks:", "int(minutes_day / 60) hours_day_without_lunch = hours_day - 1 minutes_day =", "%s!\" % (len(filtered), date)) print(\"Writing to %s\" % (csv_filename)) with", "arg == \"csv\": export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) else: date_to_use", "= time hours.append(time) dates.append(date) if first is None: first =", "= \"%d:%d hours:minutes\" % (total_hours, total_minutes) expected = 0 actual_h,", "default for now wfh_info = \"at all times, unless mentioned", "or \"???\")) rows = sheet.values() return rows def load_sheet_and_read_data(api, timesheet_url,", "work from home # days were were selected weekday =", "date = _load_sheet_data(api, timesheet_url, arg_date) filtered = [row for row", "details.strip()) print(\"\") print(\"CSV output to: %s\" % (csv_filename)) def calc_daily_hours_for_month(api,", "COL_TIME_START else None time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END", "2 else sys.argv[1].strip() if arg == \"stats\": calc_stats(sheets, timesheet_url, date", "arg_date) filtered = [row for row in rows if row", "local_h work_minutes += local_m actual_h = work_hours # 330 minutes", "timesheet: print(\"\\n\\n\") print(\"Timesheet for %s\" % (use_date)) print(timesheet) print(\"\\n\") else:", "debug output\"\"\") print(\"\") print(\"Trying to load client-secrets.json file ...\") secrets_file,", "* half_hours) if DEBUG: print(\"time: %s, %s $ %s $", "arg_date=None): try: date = arrow.get(arg_date, 'YYYYMM') except Exception: # pylint:", "%s\\t[%s:00 vs %s:%s] %s\" % (worked_date, hours[index], expected_hours_accumulated_total, str(actual_h).zfill(2), str(actual_m).zfill(2),", "(date, total_time)) print(\"\") print(\"*\" * 50) def main(): # print(\"Checking", "if half_it: local_hours = local_hours / 2 local_minutes = local_minutes", "calc(task_duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours * 60) + (6", "Daily Report for Date: %(date)s %(tasks)s %(notes)s Kind regards, %(user_full_name)s", "is None: first = row else: last = row total_hours,", "= task.split('\\n') if len(sub_tasks) > 1: result += '\\n* '", "if max_cols >= COL_DATE else None if time_start is None", "%(notes)s Kind regards, %(user_full_name)s \"\"\".strip() % { \"date\": date, \"user_full_name\":", "print(\"\"\"Tip: use \"DEBUG=1 timesheet <parameter>\" to enable debug output\"\"\") print(\"\")", "in rows if row and str(row[COL_DATE]).startswith(date)] if filtered is None", "date] if result_rows is None or not result_rows: return None", "your start time.\") return None if not end_val: if end_val", "notes[0]): return '' result = 'Additional Notes:\\n' for note in", "print(\"\") print(\"\") print(\"The task times do not add up! Tasks", "None if len(result_rows) != 1: print(\"More than one entry (%d)", "if DEBUG: print(\"task: %s\" % (t)) print(\"groups: %s\" % len(g))", "= 'half' in row[COL_WORK_FROM_HOME] if half_day: extra_info += \" -", "provide a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart\") return filename,", "SPECIAL_VALUES = [\"sick\", \"ab\", \"off\", \"wfh\", \"hol\"] SATURDAY = 5", "4 expected_hours_accumulated_total += 8 - (8 - work_hours_for_the_day) expected_minutes_accumulated_total =", "os.environ.get('DEBUG', \"0\") == \"1\" AS_CSV = os.environ.get('CSV', \"0\") == \"1\"", "COL_TIME_END = 3 COL_LUNCH = 4 COL_TIME = 5 #", "other_date rows = load_first_sheet_rows(api, timesheet_url, use_date) timesheet = get_timesheet_for_date(rows, use_date,", "max_cols >= COL_NOTES else \"\" if time_start is None or", "hours and 30 minutes local_h, local_m = calc(hours[index]) work_hours +=", "user_full_name = \"<NAME>\" print(\"\") print(\"Usage: python timesheet.py [command|date] [date]\") print(\"Example:", "0, 0 if not AS_CSV: print(\"*\" * 50) print(\"\") print(\"Valid", "= work_hours # 330 minutes = 6 hours and 30", "abs(end_hours - start_hours) * 60 minutes_day += end_minutes - start_minutes", "None found_row = result_rows[0] found_index = rows.index(found_row) start_val = found_row[COL_TIME_START]", "now.format('YYYYMM') rows = load_first_sheet_rows(api, timesheet_url, date) date_str = str(date.format('YYYYMM')) return", "update AIScanRobo every week [ # 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\")", "arg = \"read today\" if len(sys.argv) < 2 else sys.argv[1].strip()", "minutes, details.strip()) print(\"\") print(\"CSV output to: %s\" % (csv_filename)) def", "minutes_day = abs(end_hours - start_hours) * 60 minutes_day += end_minutes", "lunch_hours = str(int(float(hours)) - days).zfill(2) print(\"\") print(\"Total days worked: %s\"", "timesheet <parameter>\" to enable debug output\"\"\") print(\"\") print(\"Trying to load", "\", \",\").replace(\",\", \" and \") wfh_extra = \"Next week\" if", "50) def main(): # print(\"Checking environment variable TIMESHEET_URL for spreadsheet", "_load_sheet_data(api, timesheet_url, arg_date) # find the rows for the given", "tasks: continue # If it was a half day, meaning", "end_val = found_row[COL_TIME_END] duration_val = found_row[COL_TIME_FIXED] max_cols = len(found_row) if", "expected_minutes_accumulated_total = expected_hours_accumulated_total * 60 # hours[index] is the actual", "print(\"Total days worked: %s\" % str(days)) print(\"Total hours: %s:%s (with", "half_days # each workday has 8 hours of work, but", "print(\"\") print(\"Found (%d) entries for date %s!\" % (len(filtered), date))", "start, \"end\": end, \"duration\": duration, \"wfh_info\": wfh_info, \"tasks\": format_tasks(tasks) if", "+= local_h work_minutes += local_m actual_h = work_hours # 330", "or arrow.now().format('YYYYMM')) elif arg == \"daily\": calc_daily_hours_for_month(sheets, timesheet_url, date or", "# does not include lunch COL_MOVE = 7 COL_WORK_FROM_HOME =", "start_val: if start_val in SPECIAL_VALUES: print(\"You forgot to add your", "- Weekend work\" half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day:", "Time: %(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s]) msg =", "AS_CSV: print(\"\") print(\"Found (%d) entries for date %s!\" % (len(filtered),", "developer, I want to update AIScanRobo every week [1h] #", "sub_tasks[1:]: # actual sub tasks result += '\\n\\t' + sub_task", "range(COL_TASKS_START, max_cols): task = row[idx].strip() if task: tasks.append(task) day_type =", "first is None: first = row else: last = row", "has today's date in it result_rows = [row for row", "text: SCAN-4167 As a developer, I want to update AIScanRobo", "\"%dm\" % (duration_minutes), work_type, details]) # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text:", "== 0: print(\"%s: no tasks found! %s\" % (date, time_start))", "60 minutes_day += end_minutes - start_minutes minutes += minutes_day hours_day", "re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") for row in filtered: max_cols = len(row) time =", "%s $ %s $ %s\" % (hours, half_hours, duration, minutes))", "minutes local_h, local_m = calc(hours[index]) work_hours += local_h work_minutes +=", "= row[COL_DATE] if max_cols >= COL_DATE else None if time_start", "tasks: return '' result = 'Tasks:\\n' for task in tasks:", "except ready to email to HR\") print(\"- csv: task breakdown", "= (hours * 60) + (6 * half_hours) total_time_minutes_from_tasks +=", "if total_duration != test_duration: print(\"\") print(\"\") print(\"The task times do", "except arrow.parser.ParserError: return arrow.get(val, \"H:mm\") start = parse_hours(start_val).format(\"HH:mm\") end =", "now wfh_info = \"at all times, unless mentioned otherwise below\"", "notes = notes_str.split('\\n') # check the previous Friday entry (if", "= found_row[COL_TIME_START] end_val = found_row[COL_TIME_END] duration_val = found_row[COL_TIME_FIXED] max_cols =", "duration = str(duration_val) notes_str = found_row[COL_NOTES] notes = notes_str.split('\\n') #", "= 10 SPECIAL_VALUES = [\"sick\", \"ab\", \"off\", \"wfh\", \"hol\"] SATURDAY", "%s\" % use_date) def get_timesheet_for_date(rows, date, user_full_name): # find the", "hours[index])) else: pass else: half_day = worked_date in half_days #", "= re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") total_time_minutes_from_tasks = 0 tasks = [] for idx", "- start_minutes minutes += minutes_day hours_day = int(minutes_day / 60)", "print(\" use \\\"CSV=1 python timesheet.py stats\\\" to format the output\")", "= int(minutes_day / 60) hours_day_without_lunch = hours_day - 1 minutes_day", "utf-8 -*- # # from __future__ import print_function import csv", "today is not Friday), to see what work from home", "6:30 means 6 hours and 30 minutes local_h, local_m =", "every week [1h] # 3 groups: # SCAN-4167 # As", "given/current month\") print(\" use \\\"CSV=1 python timesheet.py stats\\\" to format", "task in tasks: if '\\n' in task: sub_tasks = task.split('\\n')", "['o', 'O'] or \"OFFICE\" in notes.upper(): extra_info += \" -", "f: f = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL) # f.writerow(['<NAME>', 'Accounting',", "\"HH:mm\") except arrow.parser.ParserError: return arrow.get(val, \"H:mm\") start = parse_hours(start_val).format(\"HH:mm\") end", "COL_TIME_START else None date = row[COL_DATE] if max_cols >= COL_DATE", "% len(g)) [task_number, task_details, duration] = g hours, half_hours =", "Commute to office\" minutes_day = abs(end_hours - start_hours) * 60", "meaning I took half a day off, then only count", "if max_cols >= COL_NOTES else \"\" if time_start is None", "= \"Next week\" if is_same_day else \"This week\" wfh_info =", "total_time = 0, 0, \"\" for index, hour in enumerate(hours):", "or arrow.now().format('YYYYMM')) elif arg == \"csv\": export_csv(sheets, timesheet_url, date or", "half day, meaning I took half a day off, then", "extra_info += \" - half day PTO\" if worked_at in", "max_cols = len(found_row) if not start_val: if start_val in SPECIAL_VALUES:", "note return result total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2) total_minutes =", "found_row = result_rows[0] found_index = rows.index(found_row) start_val = found_row[COL_TIME_START] end_val", "str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2) print(\"%s: %s to %s =", "str(actual_m).zfill(2), \"Half day\" if half_day else \"\")) if not AS_CSV:", "else last[COL_DATE]) print(\"\") print(\"Total time in %s: %s\" % (date,", "time.\") return None if not end_val: if end_val in SPECIAL_VALUES:", "half_day: extra_info += \" - half day PTO\" if worked_at", "up hours and minutes for the given/current month\") print(\" use", "opened. Accessing cell data ...\" % (sheets.title or \"???\", sheet.title", "local_hours = local_hours / 2 local_minutes = local_minutes / 2", "minutes = (hours * 60) + (6 * half_hours) if", "if max_cols >= COL_TIME_START else None date = row[COL_DATE] if", "\"\" if time_start is None or time_end is None or", "total_hours += (total_minutes / 60) total_minutes = total_minutes % 60", ">= 60: total_hours += (total_minutes / 60) total_minutes = total_minutes", "import print_function import csv import os import re import sys", "(worked_date, hours[index], expected_hours_accumulated_total, str(actual_h).zfill(2), str(actual_m).zfill(2), \"Half day\" if half_day else", "timesheet_url: raise Exception(\"Please set the TIMESHEET_URL environment variable accordingly.\") #", "\"wfh\", \"hol\"] SATURDAY = 5 SUNDAY = 6 def calc(hour,", "if filtered is None or not filtered: return None print(\"\")", "' + note return result total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2)", "% (task_number.strip(), task_details[:-2].strip(), '\\n'.join(other_lines))) def format_tasks(tasks): if not tasks: return", "to HR\") print(\"- csv: task breakdown for the month and", "% (total_hours, total_minutes) test_duration = duration if len(test_duration) <= 4:", "hour lunch: %s:%s)\" % (hours, minutes, lunch_hours, minutes)) print(\"\") def", "% { \"date\": date, \"user_full_name\": user_full_name, \"start\": start, \"end\": end,", "found_row[COL_TIME_END] duration_val = found_row[COL_TIME_FIXED] max_cols = len(found_row) if not start_val:", "%s: %s\\t[%s:00 vs %s:%s] %s\" % (worked_date, hours[index], expected_hours_accumulated_total, str(actual_h).zfill(2),", "if filtered is None or not filtered: return None csv_filename", "COL_DATE = 0 COL_WEEKDAY = 1 COL_TIME_START = 2 COL_TIME_END", "* 60) + (6 * half_hours) total_time_minutes_from_tasks += minutes other_lines", "else: result += '\\n* ' + task else: result +=", "ex: print(\"ERROR: %s - %s\" % (t, str(ex))) continue if", "parts = str(hour).split(split_char) try: local_hours = int(parts[0]) local_minutes = int(parts[1])", "work_type, details]) # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167 As a", "= 0 COL_WEEKDAY = 1 COL_TIME_START = 2 COL_TIME_END =", "duration if total_duration != test_duration: print(\"\") print(\"\") print(\"The task times", "days += 1 no_lunch = str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2)", "os.path.join(CURRENT_PATH, \"client-secrets.json\") cachefile = os.path.join(CURRENT_PATH, \"client-secrets-cache.json\") if not os.path.exists(filename): filename", "\"duration\", \"work_type\", \"details\"]) def w(task, duration_minutes, details = \"\"): work_type", "in range(COL_TASKS_START, max_cols): task = row[idx].strip() if task: tasks.append(task) day_type", "open(csv_filename, mode='w') as f: f = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)", "the default for now wfh_info = \"at all times, unless", "print(u\"Timesheet [%s] sheet [%s] opened. Accessing cell data ...\" %", "\" - Commute to office\" minutes_day = abs(end_hours - start_hours)", "worked_at in ['o', 'O'] or \"OFFICE\" in notes.upper(): extra_info +=", "start_val = found_row[COL_TIME_START] end_val = found_row[COL_TIME_END] duration_val = found_row[COL_TIME_FIXED] max_cols", "= row total_hours, total_minutes, total_time = 0, 0, \"\" for", "a developer, I want to update AIScanRobo every week [1h]", "= found_row[COL_NOTES] notes = notes_str.split('\\n') # check the previous Friday", "= [] for idx in range(COL_TASKS_START, max_cols): task = row[idx].strip()", "print(\"ERROR: %s - %s\" % (t, str(ex))) continue if DEBUG:", "print(\"\") print(\"Total days worked: %s\" % str(days)) print(\"Total hours: %s:%s", "= result_rows[0] found_index = rows.index(found_row) start_val = found_row[COL_TIME_START] end_val =", "str(row[COL_DATE]).startswith(date)] if filtered is None or not filtered: return None", "actual_m = work_minutes % 60 if AS_CSV: print(\"%s;%s;\" % (worked_date,", "AIScanRobo every week [1h] # 3 groups: # SCAN-4167 #", "= row[COL_DATE] if max_cols >= COL_DATE else None if day_type", "'\\n'.join(other_lines))) def format_tasks(tasks): if not tasks: return '' result =", "print(\"\") print(\"*\" * 50) def main(): # print(\"Checking environment variable", "def calc(hour, half_it=False, split_char = \":\"): parts = str(hour).split(split_char) try:", "minutes)) print(\"\") def calc_stats(api, timesheet_url, arg_date=None): rows, date = _load_sheet_data(api,", "a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart\") return filename, cachefile", "= get_timesheet_for_date(rows, use_date, user_full_name) if timesheet: print(\"\\n\\n\") print(\"Timesheet for %s\"", "total_minutes += local_minutes if total_minutes >= 60: total_hours += (total_minutes", "tasks: t = task.split('\\n')[0] if '\\n' in task else task", "= rows[check_start_index] if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or \"\").lower().startswith(\"fr\"):", "a half day, meaning I took half a day off,", "%s\" % (t)) print(\"groups: %s\" % len(g)) [task_number, task_details, duration]", "half_days = {} first = None last = None for", "if not AS_CSV: print(\"\") print(\"First:\", \"<first> not found\" if first", "% str(days)) print(\"Total hours: %s:%s (with 1 hour lunch: %s:%s)\"", "\" - half day PTO\" if worked_at in ['o', 'O']", "local_hours, local_minutes except: if len(parts) == 1: try: return int(parts[0]),", "= task.split('\\n')[0] if '\\n' in task else task try: g", "= str(int(float(hours)) - days).zfill(2) print(\"\") print(\"Total days worked: %s\" %", "for Date: %(date)s %(tasks)s %(notes)s Kind regards, %(user_full_name)s \"\"\".strip() %", "for %s\" % use_date) def get_timesheet_for_date(rows, date, user_full_name): # find", "5 # includes lunch COL_TIME_FIXED = 6 # does not", "' + task return result def format_notes(notes): if not notes", "notes: result += '\\n* ' + note return result total_hours", "arg == \"daily\": calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg", "'\\n* ' + task return result def format_notes(notes): if not", "for row in rows if row and str(row[COL_DATE]) == date]", "+= 1 if hours[index] in SPECIAL_VALUES: if not AS_CSV: print(\"", "total_time = \"%d:%d hours:minutes\" % (total_hours, total_minutes) expected = 0", "if DEBUG: print(\"time: %s, %s $ %s $ %s\" %", "def format_tasks(tasks): if not tasks: return '' result = 'Tasks:\\n'", "else: # print(\"SAME DAY\") is_same_day = True wfh = u\"\"", "all tasks (%s): %s - %s:%s\" % (len(tasks), total_time_minutes_from_tasks, total_hours,", "in SPECIAL_VALUES: time = day_type hours.append(time) dates.append(date) continue elif not", "here: https://github.com/xflr6/gsheets#quickstart\") return filename, cachefile def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')): print(\"Opening", "arrow.now() today = now.format('YYYYMMDD') try: other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD') except", "0 def get_client_secret_filenames(): filename = os.path.join(CURRENT_PATH, \"client-secrets.json\") cachefile = os.path.join(CURRENT_PATH,", "daily: same as stats, except ready to email to HR\")", "is None or not result_rows: return None if len(result_rows) !=", "-*- coding: utf-8 -*- # # from __future__ import print_function", "Friday entry (if today is not Friday), to see what", "+= local_minutes if total_minutes >= 60: total_hours += (total_minutes /", "\"client-secrets-cache.json\")) if not os.path.exists(filename): raise Exception(\"Please provide a client-secret.json file,", "= 0 tasks = [] for idx in range(COL_TASKS_START, max_cols):", "wfh_info, \"tasks\": format_tasks(tasks) if tasks else \"\", \"notes\": format_notes(notes) if", "(hours, minutes, lunch_hours, minutes)) print(\"\") def calc_stats(api, timesheet_url, arg_date=None): rows,", "day\" if half_day else \"\")) if not AS_CSV: print(\"\") print(\"First:\",", "(if today is not Friday), to see what work from", "'\\n' else: result += '\\n* ' + task else: result", "variable not set!\") user_full_name = \"<NAME>\" print(\"\") print(\"Usage: python timesheet.py", "(arg_date)) print(\"\") print(\"Found (%d) entries for date %s!\" % (len(filtered),", "= \"Meeting\" if \"meeting\" in details.lower() else \"Development\" # Needed", "see what work from home # days were were selected", "tasks: if '\\n' in task: sub_tasks = task.split('\\n') if len(sub_tasks)", "SCAN-4167 As a developer, I want to update AIScanRobo every", "\"\", \"total_hours\": total_hours, \"total_minutes\": total_minutes, } print(\"Total time for all", "else: result += '\\n* ' + task return result def", "+= '\\n' else: result += '\\n* ' + task else:", "found_row[COL_TIME_FIXED] max_cols = len(found_row) if not start_val: if start_val in", "1: try: return int(parts[0]), 0 except: return 0, 0 def", "print(\"Valid hours entries: %s\\t[required vs actual]\" % len(hours)) deduct_work_hours =", "None csv_filename = os.path.join(os.getcwd(), \"%s.csv\" % (arg_date)) print(\"\") print(\"Found (%d)", "\"user_full_name\": user_full_name, \"start\": start, \"end\": end, \"duration\": duration, \"wfh_info\": wfh_info,", "# 330 minutes = 6 hours and 30 minutes actual_h", "row[COL_WORK_FROM_HOME] if half_day: half_days[date] = time hours.append(time) dates.append(date) if first", "date %s!\" % (len(filtered), date)) dates, hours = [], []", "not AS_CSV: print(\"\") print(\"Found (%d) entries for date %s!\" %", "test_duration = duration if len(test_duration) <= 4: test_duration = \"0%s\"", "date in it result_rows = [row for row in rows", "== '' else arg load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name) print(\"Done.\") if", "+ sub_tasks[0] # main task for sub_task in sub_tasks[1:]: #", "groups: # SCAN-4167 # As a developer, I want to", "= 'half' in row[COL_WORK_FROM_HOME] if half_day: half_days[date] = time hours.append(time)", "(duration_minutes), work_type, details]) # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167 As", "minutes = 0 days = 0 for row in filtered:", "if first is None else first[COL_DATE]) print(\"Last:\", \"<last> not found\"", "Kind regards, %(user_full_name)s \"\"\".strip() % { \"date\": date, \"user_full_name\": user_full_name,", "hours = [], [] half_days = {} first = None", "% (use_date)) print(timesheet) print(\"\\n\") else: print(\"No entry found for %s\"", "worked_date in enumerate(dates): days += 1 if hours[index] in SPECIAL_VALUES:", "return arrow.get(val, \"HH:mm\") except arrow.parser.ParserError: return arrow.get(val, \"H:mm\") start =", "only half of 8, aka 4. work_hours_for_the_day = 8 if", "\"csv\": export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) else: date_to_use = \"read", "len(tasks))) for task in tasks: t = task.split('\\n')[0] if '\\n'", "0 actual_h, actual_m = 0, 0 if not AS_CSV: print(\"*\"", "DEBUG = os.environ.get('DEBUG', \"0\") == \"1\" AS_CSV = os.environ.get('CSV', \"0\")", "max_cols >= COL_TIME_FIXED else None time_start = row[COL_TIME_START] if max_cols", "for the given month filtered = [row for row in", "0 tasks = [] for idx in range(COL_TASKS_START, max_cols): task", "= wfh.replace(\"Tue\", \"Tuesday\") wfh = wfh.replace(\"Wed\", \"Wednesday\") wfh = wfh.replace(\"Thu\",", "URL...\") user_full_name = os.environ.get('USER_FULL_NAME', \"\").strip() if not user_full_name: print(\"Warning: USER_FULL_NAME", "actual]\" % len(hours)) deduct_work_hours = 0 work_hours = 0 work_minutes", "%s: Off, because %s\" % (worked_date, hours[index])) else: pass else:", "date in half_days) total_hours += local_hours total_minutes += local_minutes if", "total_minutes) expected = 0 actual_h, actual_m = 0, 0 if", "<parameter>\" to enable debug output\"\"\") print(\"\") print(\"Trying to load client-secrets.json", "for date %s! Please fix your sheet!\" % (len(result_rows), date))", "g = r.match(t).groups() except Exception as ex: print(\"ERROR: %s -", "%s\" % len(g)) [task_number, task_details, duration] = g hours, half_hours", "w(task_number, minutes, details.strip()) print(\"\") print(\"CSV output to: %s\" % (csv_filename))", "int(work_minutes / 60) actual_m = work_minutes % 60 if AS_CSV:", "\"\", \"notes\": format_notes(notes) if notes else \"\", \"total_hours\": total_hours, \"total_minutes\":", "/ 60) actual_m = work_minutes % 60 if AS_CSV: print(\"%s;%s;\"", "int(parts[1]) if half_it: local_hours = local_hours / 2 local_minutes =", "timesheet_url, arg_date=None): try: date = arrow.get(arg_date, 'YYYYMM') except Exception: #", "COL_TIME_END else None date = row[COL_DATE] if max_cols >= COL_DATE", "for row in rows if row and str(row[COL_DATE]).startswith(date)] if filtered", "< 2 else sys.argv[1].strip() if arg == \"stats\": calc_stats(sheets, timesheet_url,", "task in tasks: t = task.split('\\n')[0] if '\\n' in task", "the output\") print(\" as CSV\") print(\"- daily: same as stats,", "if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or \"\").lower().startswith(\"fr\"): break check_start_index", "% (sheets.title or \"???\", sheet.title or \"???\")) rows = sheet.values()", "date = row[COL_DATE] if max_cols >= COL_DATE else None if", "timesheet_url, arg_date=None): rows, date = _load_sheet_data(api, timesheet_url, arg_date) # find", "row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None time_start = row[COL_TIME_START]", "r.match(t).groups() except Exception as ex: print(\"ERROR: %s - %s\" %", "if len(result_rows) != 1: print(\"More than one entry (%d) found", "for %s\" % (use_date)) print(timesheet) print(\"\\n\") else: print(\"No entry found", "+= local_hours total_minutes += local_minutes if total_minutes >= 60: total_hours", "str(actual_h).zfill(2), str(actual_m).zfill(2), \"Half day\" if half_day else \"\")) if not", "COL_TIME_START = 2 COL_TIME_END = 3 COL_LUNCH = 4 COL_TIME", "60) + (6 * half_hours) if DEBUG: print(\"time: %s, %s", "if max_cols >= COL_TIME_END else None date = row[COL_DATE] if", "work_type = \"Meeting\" if \"meeting\" in details.lower() else \"Development\" #", "None date = row[COL_DATE] if max_cols >= COL_DATE else None", "rows def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name): now = arrow.now() today", "PREVS FRIDAY.\") is_same_day = False else: # print(\"SAME DAY\") is_same_day", "half_it: local_hours = local_hours / 2 local_minutes = local_minutes /", "'half' in row[COL_WORK_FROM_HOME] if half_day: extra_info += \" - half", "parse_hours(val): try: return arrow.get(val, \"HH:mm\") except arrow.parser.ParserError: return arrow.get(val, \"H:mm\")", "add your start time.\") return None if not end_val: if", "date, \"user_full_name\": user_full_name, \"start\": start, \"end\": end, \"duration\": duration, \"wfh_info\":", "check_start_index += 1 is_same_day = None if check_start_index != found_index:", "(t)) print(\"groups: %s\" % len(g)) [task_number, task_details, task_duration] = g", "time_start)) continue print(\"%s: %d tasks found!\" % (date, len(tasks))) for", "% (t)) print(\"groups: %s\" % len(g)) [task_number, task_details, task_duration] =", "= %s (without lunch: %s)%s\" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date,", "in range(COL_TASKS_START, max_cols): task = found_row[idx].strip() if task: t =", "= os.environ.get('CSV', \"0\") == \"1\" COL_DATE = 0 COL_WEEKDAY =", "column that has today's date in it result_rows = [row", "not os.path.exists(filename): raise Exception(\"Please provide a client-secret.json file, as described", "\"%d:%d hours:minutes\" % (total_hours, total_minutes) expected = 0 actual_h, actual_m", "* 60 # hours[index] is the actual time worked, e.g.", "Report] %(date)s WFH: %(wfh_info)s Hi, Daily Report for Date: %(date)s", "is None else first[COL_DATE]) print(\"Last:\", \"<last> not found\" if last", "/ 60)).zfill(2) total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2) total_duration = \"%s:%s\"", "Day off because of %s\" % (date, \"whatever\" if time_start", "= re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") for row in filtered: max_cols = len(row) time", "data ...\" % (sheets.title or \"???\", sheet.title or \"???\")) rows", "try: return arrow.get(val, \"HH:mm\") except arrow.parser.ParserError: return arrow.get(val, \"H:mm\") start", "os.path.exists(filename): raise Exception(\"Please provide a client-secret.json file, as described here:", "CSV columns # username|date|task|duration|work_type|details f.writerow([\"daniel\", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, \"%dm\" %", "/ 60) total_minutes = total_minutes % 60 total_time = \"%d:%d", "% (duration_minutes), work_type, details]) # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167", "test_duration)) print(\"\") print(\"\") # Time: %(start)s - %(end)s (%(duration)s hours", "hours entries: %s\\t[required vs actual]\" % len(hours)) deduct_work_hours = 0", "tasks result += '\\n\\t' + sub_task result += '\\n' else:", "local_hours, local_minutes = calc(hour, date in half_days) total_hours += local_hours", "= \"\"): work_type = \"Meeting\" if \"meeting\" in details.lower() else", "\"Next week\" if is_same_day else \"This week\" wfh_info = \"\"\"%s", "sheet.values() return rows def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name): now =", "result_rows: return None if len(result_rows) != 1: print(\"More than one", "wfh != \"\" else \"all days\" # 2021-01-04 just make", "local_m actual_h = work_hours # 330 minutes = 6 hours", "it was a half day, meaning I took half a", "if not AS_CSV: print(\"*\" * 50) print(\"\") print(\"Valid hours entries:", "return None def parse_hours(val): try: return arrow.get(val, \"HH:mm\") except arrow.parser.ParserError:", "if len(sys.argv) < 2 else sys.argv[1].strip() if arg == \"stats\":", "not add up! Tasks vs time entered: %s != %s\"", "cache_file = get_client_secret_filenames() sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False) print(\"Success.\") date", "total_hours, \"total_minutes\": total_minutes, } print(\"Total time for all tasks (%s):", "# main task for sub_task in sub_tasks[1:]: # actual sub", "# 3 groups: # SCAN-4167 # As a developer, I", "= sheet.values() return rows def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name): now", "None def parse_hours(val): try: return arrow.get(val, \"HH:mm\") except arrow.parser.ParserError: return", "timesheet_url, arg_date): rows, date = _load_sheet_data(api, timesheet_url, arg_date) filtered =", "(wfh_extra, wfh) if wfh != \"\" else \"all days\" #", "if day_type in SPECIAL_VALUES: time = day_type hours.append(time) dates.append(date) continue", "= found_row[COL_TIME_FIXED] max_cols = len(found_row) if not start_val: if start_val", "%s != %s\" % (total_duration, test_duration)) print(\"\") print(\"\") # Time:", "COL_NOTES = 9 COL_TASKS_START = 10 SPECIAL_VALUES = [\"sick\", \"ab\",", "if len(test_duration) <= 4: test_duration = \"0%s\" % duration if", "except Exception: # pylint: disable=W0703 now = arrow.now() date =", "- 7 check_row = found_row while (check_start_index < found_index): check_row", "mentioned otherwise below\" # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167 As", "arrow.parser.ParserError: return arrow.get(val, \"H:mm\") start = parse_hours(start_val).format(\"HH:mm\") end = parse_hours(end_val).format(\"HH:mm\")", "= os.path.join(CURRENT_PATH, \"client-secrets-cache.json\") if not os.path.exists(filename): filename = os.path.expanduser(os.path.join(\"~\", \"client-secrets.json\"))", "arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD') except arrow.parser.ParserError: other_date = today use_date = other_date", "= arrow.get(str(date), 'YYYYMMDD') if the_date.weekday() in [SATURDAY, SUNDAY]: extra_info +=", "= duration if len(test_duration) <= 4: test_duration = \"0%s\" %", "vs time entered: %s != %s\" % (total_duration, test_duration)) print(\"\")", "else \"all days\" # 2021-01-04 just make this the default", "result += '\\n* ' + task return result def format_notes(notes):", "%s\" % (worked_date, hours[index])) else: pass else: half_day = worked_date", "...\") secrets_file, cache_file = get_client_secret_filenames() sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False)", "None if not end_val: if end_val in SPECIAL_VALUES: print(\"You forgot", "= task.split('\\n')[1:] tasks.append(\"%s %s\\n%s\" % (task_number.strip(), task_details[:-2].strip(), '\\n'.join(other_lines))) def format_tasks(tasks):", "!= found_index: # print(\"HA! GOT PREVS FRIDAY.\") is_same_day = False", "# If it was a half day, meaning I took", "work_hours # 330 minutes = 6 hours and 30 minutes", "entry (%d) found for date %s! Please fix your sheet!\"", "def w(task, duration_minutes, details = \"\"): work_type = \"Meeting\" if", "or time_end is None or date is None: continue tasks", "return rows def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name): now = arrow.now()", "else None date = row[COL_DATE] if max_cols >= COL_DATE else", "- Commute to office\" minutes_day = abs(end_hours - start_hours) *", "note in notes: result += '\\n* ' + note return", "else None if day_type is None: continue if day_type in", "print(\"Total time for all tasks (%s): %s - %s:%s\" %", "else: date_to_use = \"read today\" if arg == '' else", "task_details[:-1].strip()) w(task_number, minutes, details.strip()) print(\"\") print(\"CSV output to: %s\" %", "today = now.format('YYYYMMDD') try: other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD') except arrow.parser.ParserError:", "to format the output\") print(\" as CSV\") print(\"- daily: same", "(with 1 hour lunch: %s:%s)\" % (hours, minutes, lunch_hours, minutes))", "index, worked_date in enumerate(dates): days += 1 if hours[index] in", "timesheet_url, arg_date) # find the rows for the given month", "'' result = 'Additional Notes:\\n' for note in notes: result", "% (len(filtered), date)) minutes = 0 days = 0 for", "split_char=\".\") minutes = (hours * 60) + (6 * half_hours)", "idx in range(COL_TASKS_START, max_cols): task = found_row[idx].strip() if task: t", "= int(parts[0]) local_minutes = int(parts[1]) if half_it: local_hours = local_hours", "hours: %s:%s (with 1 hour lunch: %s:%s)\" % (hours, minutes,", "+= '\\n\\t' + sub_task result += '\\n' else: result +=", "local_minutes / 2 return local_hours, local_minutes except: if len(parts) ==", "is_same_day else \"This week\" wfh_info = \"\"\"%s %s\"\"\" % (wfh_extra,", "rows if row and str(row[COL_DATE]) == date] if result_rows is", "None time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None", "commandline, user_full_name): now = arrow.now() today = now.format('YYYYMMDD') try: other_date", "forgot to add your start time.\") return None if not", "= 0 days = 0 expected_hours_accumulated_total = 0 for index,", "sub_tasks = task.split('\\n') if len(sub_tasks) > 1: result += '\\n*", "max_cols >= COL_MOVE else None notes = row[COL_NOTES] if max_cols", "'\\n\\t' + sub_task result += '\\n' else: result += '\\n*", "disable=W0703 now = arrow.now() date = now.format('YYYYMM') rows = load_first_sheet_rows(api,", "\",\").replace(\",\", \" and \") wfh_extra = \"Next week\" if is_same_day", "work_minutes = 0 days = 0 expected_hours_accumulated_total = 0 for", "r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") total_time_minutes_from_tasks = 0 tasks = [] for", "if len(sub_tasks) > 1: result += '\\n* ' + sub_tasks[0]", "arg_date) # find the rows for the given month filtered", "filtered: max_cols = len(row) time = row[COL_TIME_FIXED] if max_cols >=", "to see what work from home # days were were", "2 COL_TIME_END = 3 COL_LUNCH = 4 COL_TIME = 5", "not notes[0]): return '' result = 'Additional Notes:\\n' for note", "60)).zfill(2) total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2) total_duration = \"%s:%s\" %", "= arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD') except arrow.parser.ParserError: other_date = today use_date =", "task = row[idx].strip() if task: tasks.append(task) if len(tasks) == 0:", "%s:%s\" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes)) return msg def _load_sheet_data(api,", "user_full_name): now = arrow.now() today = now.format('YYYYMMDD') try: other_date =", "local_minutes = local_minutes / 2 return local_hours, local_minutes except: if", "half_day else \"\")) if not AS_CSV: print(\"\") print(\"First:\", \"<first> not", "total_time_minutes_from_tasks += minutes other_lines = task.split('\\n')[1:] tasks.append(\"%s %s\\n%s\" % (task_number.strip(),", "Exception(\"Please provide a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart\") return", "export_csv(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api, timesheet_url, arg_date) filtered", "True wfh = u\"\" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME]", "\"0\") == \"1\" COL_DATE = 0 COL_WEEKDAY = 1 COL_TIME_START", "for spreadsheet URL...\") timesheet_url = os.environ.get('TIMESHEET_URL', \"\").strip() if not timesheet_url:", "forgot to add your end time.\") return None #if max_cols", "row[COL_NOTES] if max_cols >= COL_NOTES else \"\" if time_start is", "0 for row in filtered: max_cols = len(row) time =", "and \") wfh_extra = \"Next week\" if is_same_day else \"This", "%s!\" % (len(filtered), date)) minutes = 0 days = 0", "or not result_rows: return None if len(result_rows) != 1: print(\"More", "0 expected_hours_accumulated_total = 0 for index, worked_date in enumerate(dates): days", "%s\" % (total_duration, test_duration)) print(\"\") print(\"\") # Time: %(start)s -", "the_date = arrow.get(str(date), 'YYYYMMDD') if the_date.weekday() in [SATURDAY, SUNDAY]: extra_info", "str(minutes_day).zfill(2) print(\"%s: %s to %s = %s (without lunch: %s)%s\"", "= local_hours / 2 local_minutes = local_minutes / 2 return", "(len(result_rows), date)) return None found_row = result_rows[0] found_index = rows.index(found_row)", "print(\"- daily: same as stats, except ready to email to", "# print(\"No notes/tasks entered yet.\") # return None def parse_hours(val):", "date or arrow.now().format('YYYYMM')) elif arg == \"daily\": calc_daily_hours_for_month(sheets, timesheet_url, date", "except: return 0, 0 def get_client_secret_filenames(): filename = os.path.join(CURRENT_PATH, \"client-secrets.json\")", "add your end time.\") return None #if max_cols >= COL_NOTES:", "if not half_day else 4 expected_hours_accumulated_total += 8 - (8", "date = arrow.get(arg_date, 'YYYYMM') except Exception: # pylint: disable=W0703 now", "# regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167 As a developer, I", "SUNDAY]: extra_info += \" - Weekend work\" half_day = 'half'", "= \"%s:%s\" % (total_hours, total_minutes) test_duration = duration if len(test_duration)", "dates[index] local_hours, local_minutes = calc(hour, date in half_days) total_hours +=", "int(parts[0]), 0 except: return 0, 0 def get_client_secret_filenames(): filename =", "%s\\t[required vs actual]\" % len(hours)) deduct_work_hours = 0 work_hours =", "else 4 expected_hours_accumulated_total += 8 - (8 - work_hours_for_the_day) expected_minutes_accumulated_total", "tasks.append(task) day_type = row[COL_TIME_START] if max_cols >= COL_TIME_START else None", "duration_val = found_row[COL_TIME_FIXED] max_cols = len(found_row) if not start_val: if", "' + sub_tasks[0] # main task for sub_task in sub_tasks[1:]:", "COL_DATE else None worked_at = row[COL_MOVE] if max_cols >= COL_MOVE", "date = now.format('YYYYMM') rows = load_first_sheet_rows(api, timesheet_url, date) date_str =", "notes_str = found_row[COL_NOTES] notes = notes_str.split('\\n') # check the previous", "\"Wednesday\") wfh = wfh.replace(\"Thu\", \"Thursday\") wfh = wfh.replace(\"Fri\", \"Friday\") wfh", "or date is None: continue start_hours, start_minutes = calc(time_start) end_hours,", "print(\"\") print(\"Usage: python timesheet.py [command|date] [date]\") print(\"Example: python timesheet.py stats", "arrow.get(val, \"HH:mm\") except arrow.parser.ParserError: return arrow.get(val, \"H:mm\") start = parse_hours(start_val).format(\"HH:mm\")", "time for all tasks (%s): %s - %s:%s\" % (len(tasks),", "= row[idx].strip() if task: tasks.append(task) day_type = row[COL_TIME_START] if max_cols", "total_minutes % 60 total_time = \"%d:%d hours:minutes\" % (total_hours, total_minutes)", "= now.format('YYYYMMDD') try: other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD') except arrow.parser.ParserError: other_date", "= 7 COL_WORK_FROM_HOME = 8 COL_NOTES = 9 COL_TASKS_START =", "= api.get(timesheet_url) sheet = sheets.sheets[0] print(u\"Timesheet [%s] sheet [%s] opened.", "/ 2 return local_hours, local_minutes except: if len(parts) == 1:", "= len(row) time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else", "AS_CSV: print(\"*\" * 50) print(\"\") print(\"Valid hours entries: %s\\t[required vs", "COL_TIME_FIXED else None time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START", "= get_client_secret_filenames() sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False) print(\"Success.\") date =", "> COL_WEEKDAY and check_row[COL_WEEKDAY] or \"\").lower().startswith(\"fr\"): break check_start_index += 1", "half_it=False, split_char = \":\"): parts = str(hour).split(split_char) try: local_hours =", "half_hours = calc(duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours * 60)", "main task for sub_task in sub_tasks[1:]: # actual sub tasks", "stats, except ready to email to HR\") print(\"- csv: task", "\"whatever\" if time_start == 0 else time_start)) continue extra_info =", "= [], [] half_days = {} first = None last", "[row for row in rows if row and str(row[COL_DATE]).startswith(date)] if", "1 and not notes[0]): return '' result = 'Additional Notes:\\n'", "the month and time spend on each task\") print(\"\") print(\"\"\"Tip:", "len(tasks) == 0: print(\"%s: no tasks found! %s\" % (date,", "times, unless mentioned otherwise below\" # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text:", "= os.environ.get('TIMESHEET_URL', \"\").strip() if not timesheet_url: raise Exception(\"Please set the", "if result_rows is None or not result_rows: return None if", "= \"read today\" if len(sys.argv) < 2 else sys.argv[1].strip() if", "date or arrow.now().format('YYYYMM')) elif arg == \"csv\": export_csv(sheets, timesheet_url, date", "\"%s:%s\" % (total_hours, total_minutes) test_duration = duration if len(test_duration) <=", "notes_str.split('\\n') # check the previous Friday entry (if today is", "row else: last = row total_hours, total_minutes, total_time = 0,", "time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None tasks", "g hours, half_hours = calc(duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours", "print(\"CSV output to: %s\" % (csv_filename)) def calc_daily_hours_for_month(api, timesheet_url, arg_date):", "if not notes or (len(notes) == 1 and not notes[0]):", "raise Exception(\"Please provide a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart\")", "if last is None else last[COL_DATE]) print(\"\") print(\"Total time in", "not AS_CSV: print(\"\") print(\"First:\", \"<first> not found\" if first is", "None print(\"\") print(\"Found (%d) entries for date %s!\" % (len(filtered),", "hours = str(minutes / 60).zfill(2) minutes = str(minutes % 60).zfill(2)", "time worked, e.g. 6:30 means 6 hours and 30 minutes", "None: first = row else: last = row total_hours, total_minutes,", "(date, len(tasks))) for task in tasks: t = task.split('\\n')[0] if", "print(\"groups: %s\" % len(g)) [task_number, task_details, duration] = g hours,", "found_index = rows.index(found_row) start_val = found_row[COL_TIME_START] end_val = found_row[COL_TIME_END] duration_val", "print(\"Total hours: %s:%s (with 1 hour lunch: %s:%s)\" % (hours,", "variable USER_FULL_NAME for spreadsheet URL...\") user_full_name = os.environ.get('USER_FULL_NAME', \"\").strip() if", "half_hours) if DEBUG: print(\"time: %s, %s $ %s $ %s\"", "check the previous Friday entry (if today is not Friday),", ">= COL_TIME_END else None date = row[COL_DATE] if max_cols >=", "\"ab\", \"off\", \"wfh\", \"hol\"] SATURDAY = 5 SUNDAY = 6", "first = row else: last = row total_hours, total_minutes, total_time", "%(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s]) msg = \"\"\"", "timesheet_url, date=arrow.now().format('YYYYMMDD')): print(\"Opening timesheet for %s ...\" % (date)) sheets", "wfh_info = \"\"\"%s %s\"\"\" % (wfh_extra, wfh) if wfh !=", "= load_first_sheet_rows(api, timesheet_url, date) date_str = str(date.format('YYYYMM')) return (rows, date_str)", "= \"<NAME>\" print(\"\") print(\"Usage: python timesheet.py [command|date] [date]\") print(\"Example: python", "= calc(time_start) end_hours, end_minutes = calc(time_end) if start_hours == 0:", "arg_date): rows, date = _load_sheet_data(api, timesheet_url, arg_date) filtered = [row", "the TIMESHEET_URL environment variable accordingly.\") # print(\"Checking environment variable USER_FULL_NAME", "+ (6 * half_hours) total_time_minutes_from_tasks += minutes other_lines = task.split('\\n')[1:]", "in enumerate(dates): days += 1 if hours[index] in SPECIAL_VALUES: if", "and time spend on each task\") print(\"\") print(\"\"\"Tip: use \"DEBUG=1", "= \"\"\" [Daily Report] %(date)s WFH: %(wfh_info)s Hi, Daily Report", "or not filtered: return None csv_filename = os.path.join(os.getcwd(), \"%s.csv\" %", "is None: continue if day_type in SPECIAL_VALUES: time = day_type", "sub tasks result += '\\n\\t' + sub_task result += '\\n'", "= r.match(t).groups() except Exception as ex: print(\"ERROR: %s - %s\"", "- start_hours) * 60 minutes_day += end_minutes - start_minutes minutes", "Tasks vs time entered: %s != %s\" % (total_duration, test_duration))", "duration, minutes)) details = \"%s %s\" % (task_number, task_details[:-1].strip()) w(task_number,", "your end time.\") return None #if max_cols >= COL_NOTES: #", "not include lunch COL_MOVE = 7 COL_WORK_FROM_HOME = 8 COL_NOTES", "is None else last[COL_DATE]) print(\"\") print(\"Total time in %s: %s\"", "filename = os.path.join(CURRENT_PATH, \"client-secrets.json\") cachefile = os.path.join(CURRENT_PATH, \"client-secrets-cache.json\") if not", "9 COL_TASKS_START = 10 SPECIAL_VALUES = [\"sick\", \"ab\", \"off\", \"wfh\",", "not AS_CSV: print(\" %s: Off, because %s\" % (worked_date, hours[index]))", "task for sub_task in sub_tasks[1:]: # actual sub tasks result", "wfh.replace(\"Mon\", \"Monday\") wfh = wfh.replace(\"Tue\", \"Tuesday\") wfh = wfh.replace(\"Wed\", \"Wednesday\")", "if weekday.startswith(\"fr\") else found_index - 7 check_row = found_row while", "else \"\")) if not AS_CSV: print(\"\") print(\"First:\", \"<first> not found\"", "try: other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD') except arrow.parser.ParserError: other_date = today", "# # from __future__ import print_function import csv import os", "timesheet_url, date or arrow.now().format('YYYYMM')) elif arg == \"csv\": export_csv(sheets, timesheet_url,", "date, user_full_name): # find the row with the first column", "regards, %(user_full_name)s \"\"\".strip() % { \"date\": date, \"user_full_name\": user_full_name, \"start\":", "hours[index])) else: print(\" %s: %s\\t[%s:00 vs %s:%s] %s\" % (worked_date,", "COL_NOTES else \"\" if time_start is None or time_end is", "actual_h += int(work_minutes / 60) actual_m = work_minutes % 60", "time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None time_start", "(sheets.title or \"???\", sheet.title or \"???\")) rows = sheet.values() return", "COL_TIME_FIXED else None tasks = [] for idx in range(COL_TASKS_START,", "python timesheet.py 20201130\") print(\"\") print(\"Available commands:\") print(\"- stats: show summed", "timesheet.py stats 202011\") print(\"Example: python timesheet.py 20201130\") print(\"\") print(\"Available commands:\")", "len(found_row) if not start_val: if start_val in SPECIAL_VALUES: print(\"You forgot", "print(\"No notes/tasks entered yet.\") # return None def parse_hours(val): try:", "end time.\") return None #if max_cols >= COL_NOTES: # print(\"No", "\"details\"]) def w(task, duration_minutes, details = \"\"): work_type = \"Meeting\"", "if not AS_CSV: print(\" %s: Off, because %s\" % (worked_date,", "range(COL_TASKS_START, max_cols): task = found_row[idx].strip() if task: t = task.split('\\n')[0]", "Hi, Daily Report for Date: %(date)s %(tasks)s %(notes)s Kind regards,", "6 hours and 30 minutes local_h, local_m = calc(hours[index]) work_hours", "TIMESHEET_URL for spreadsheet URL...\") timesheet_url = os.environ.get('TIMESHEET_URL', \"\").strip() if not", "commands:\") print(\"- stats: show summed up hours and minutes for", "row total_hours, total_minutes, total_time = 0, 0, \"\" for index,", "load_first_sheet_rows(api, timesheet_url, use_date) timesheet = get_timesheet_for_date(rows, use_date, user_full_name) if timesheet:", "half_day = 'half' in row[COL_WORK_FROM_HOME] if half_day: extra_info += \"", "to update AIScanRobo every week [ # 1h r =", "the actual time worked, e.g. 6:30 means 6 hours and", "TIMESHEET_URL environment variable accordingly.\") # print(\"Checking environment variable USER_FULL_NAME for", "return '' result = 'Additional Notes:\\n' for note in notes:", "wfh = wfh.replace(\"Mon\", \"Monday\") wfh = wfh.replace(\"Tue\", \"Tuesday\") wfh =", "= os.path.expanduser(os.path.join(\"~\", \"client-secrets-cache.json\")) if not os.path.exists(filename): raise Exception(\"Please provide a", "return None if not AS_CSV: print(\"\") print(\"Found (%d) entries for", "\"daily\": calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg == \"csv\":", "if half_day else \"\")) if not AS_CSV: print(\"\") print(\"First:\", \"<first>", "= dates[index] local_hours, local_minutes = calc(hour, date in half_days) total_hours", "if max_cols >= COL_TIME_FIXED else None tasks = [] for", "if total_minutes >= 60: total_hours += (total_minutes / 60) total_minutes", "[], [] half_days = {} first = None last =", "= (hours * 60) + (6 * half_hours) if DEBUG:", "half_day else 4 expected_hours_accumulated_total += 8 - (8 - work_hours_for_the_day)", "% (date, time_start)) continue print(\"%s: %d tasks found!\" % (date,", "== \"csv\": export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) else: date_to_use =", "= 0 work_minutes = 0 days = 0 expected_hours_accumulated_total =", "- (8 - work_hours_for_the_day) expected_minutes_accumulated_total = expected_hours_accumulated_total * 60 #", "time spend on each task\") print(\"\") print(\"\"\"Tip: use \"DEBUG=1 timesheet", "print(\"Found (%d) entries for date %s!\" % (len(filtered), date)) print(\"Writing", "print(\"task: %s\" % (t)) print(\"groups: %s\" % len(g)) [task_number, task_details,", "end, \"duration\": duration, \"wfh_info\": wfh_info, \"tasks\": format_tasks(tasks) if tasks else", "max_cols >= COL_DATE else None if day_type is None: continue", "for the given/current month\") print(\" use \\\"CSV=1 python timesheet.py stats\\\"", "[%s] sheet [%s] opened. Accessing cell data ...\" % (sheets.title", "def calc_daily_hours_for_month(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api, timesheet_url, arg_date)", "max_cols = len(row) time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED", "def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name): now = arrow.now() today =", "timesheet_url, date or arrow.now().format('YYYYMM')) else: date_to_use = \"read today\" if", "csv_filename = os.path.join(os.getcwd(), \"%s.csv\" % (arg_date)) print(\"\") print(\"Found (%d) entries", "print(\"Example: python timesheet.py 20201130\") print(\"\") print(\"Available commands:\") print(\"- stats: show", "in notes: result += '\\n* ' + note return result", "str(minutes / 60).zfill(2) minutes = str(minutes % 60).zfill(2) lunch_hours =", "if task: t = task.split('\\n')[0] if '\\n' in task else", "return None if len(result_rows) != 1: print(\"More than one entry", "in task else task try: g = r.match(t).groups() except Exception", "start_hours == 0: print(\"%s: Day off because of %s\" %", "= arrow.now() today = now.format('YYYYMMDD') try: other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD')", "% 60 total_time_for_date = str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2) days", "tasks.append(task) if len(tasks) == 0: print(\"%s: no tasks found! %s\"", "% (task_number, task_details[:-1].strip()) w(task_number, minutes, details.strip()) print(\"\") print(\"CSV output to:", "not Friday), to see what work from home # days", ">= COL_TIME_FIXED else None tasks = [] for idx in", "str(hour).split(split_char) try: local_hours = int(parts[0]) local_minutes = int(parts[1]) if half_it:", "= 'Tasks:\\n' for task in tasks: if '\\n' in task:", "arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, \"%dm\" % (duration_minutes), work_type, details]) # regex:", "None: continue tasks = [] for idx in range(COL_TASKS_START, max_cols):", "set the TIMESHEET_URL environment variable accordingly.\") # print(\"Checking environment variable", ">= COL_NOTES: # print(\"No notes/tasks entered yet.\") # return None", "None: continue if day_type in SPECIAL_VALUES: time = day_type hours.append(time)", "arrow.now().format('YYYYMM')) elif arg == \"daily\": calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))", "= u\"\" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME] wfh =", "= None if len(sys.argv) < 3 else sys.argv[2].strip() arg =", "% (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info)) hours = str(minutes", "+ note return result total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2) total_minutes", "user_full_name) if timesheet: print(\"\\n\\n\") print(\"Timesheet for %s\" % (use_date)) print(timesheet)", "def export_csv(api, timesheet_url, arg_date): rows, date = _load_sheet_data(api, timesheet_url, arg_date)", "start_hours) * 60 minutes_day += end_minutes - start_minutes minutes +=", "60 if AS_CSV: print(\"%s;%s;\" % (worked_date, hours[index])) else: print(\" %s:", "If it was a half day, meaning I took half", "= os.path.expanduser(os.path.join(\"~\", \"client-secrets.json\")) cachefile = os.path.expanduser(os.path.join(\"~\", \"client-secrets-cache.json\")) if not os.path.exists(filename):", "print(\"\") print(\"Trying to load client-secrets.json file ...\") secrets_file, cache_file =", "# print(\"HA! GOT PREVS FRIDAY.\") is_same_day = False else: #", "wfh_extra = \"Next week\" if is_same_day else \"This week\" wfh_info", "None if day_type is None: continue if day_type in SPECIAL_VALUES:", "tasks = [] for idx in range(COL_TASKS_START, max_cols): task =", "tasks (%s): %s - %s:%s\" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes))", "minutes_day hours_day = int(minutes_day / 60) hours_day_without_lunch = hours_day -", "half_hours = calc(task_duration.replace(\"h\", \"\"), split_char=\".\") minutes = (hours * 60)", "days).zfill(2) print(\"\") print(\"Total days worked: %s\" % str(days)) print(\"Total hours:", "times do not add up! Tasks vs time entered: %s", "task: tasks.append(task) if len(tasks) == 0: print(\"%s: no tasks found!", ">= COL_TIME_START else None date = row[COL_DATE] if max_cols >=", "timesheet_url, commandline, user_full_name): now = arrow.now() today = now.format('YYYYMMDD') try:", "arrow.get(arg_date, 'YYYYMM') except Exception: # pylint: disable=W0703 now = arrow.now()", "up! Tasks vs time entered: %s != %s\" % (total_duration,", "end_minutes - start_minutes minutes += minutes_day hours_day = int(minutes_day /", "[command|date] [date]\") print(\"Example: python timesheet.py stats 202011\") print(\"Example: python timesheet.py", "%(tasks)s %(notes)s Kind regards, %(user_full_name)s \"\"\".strip() % { \"date\": date,", "import os import re import sys import arrow from gsheets", "1 hour lunch: %s:%s)\" % (hours, minutes, lunch_hours, minutes)) print(\"\")", "for all tasks (%s): %s - %s:%s\" % (len(tasks), total_time_minutes_from_tasks,", "len(result_rows) != 1: print(\"More than one entry (%d) found for", "+= 1 is_same_day = None if check_start_index != found_index: #", "def calc_stats(api, timesheet_url, arg_date=None): rows, date = _load_sheet_data(api, timesheet_url, arg_date)", "this the default for now wfh_info = \"at all times,", "(task_number.strip(), task_details[:-2].strip(), '\\n'.join(other_lines))) def format_tasks(tasks): if not tasks: return ''", "calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM')) elif arg == \"daily\": calc_daily_hours_for_month(sheets,", "SPECIAL_VALUES: print(\"You forgot to add your end time.\") return None", "sheet!\" % (len(result_rows), date)) return None found_row = result_rows[0] found_index", "found for date %s! Please fix your sheet!\" % (len(result_rows),", "330 minutes = 6 hours and 30 minutes actual_h +=", "arrow.now().format('YYYYMM')) else: date_to_use = \"read today\" if arg == ''", "else None tasks = [] for idx in range(COL_TASKS_START, max_cols):", "I want to update AIScanRobo every week [ # 1h", "previous Friday entry (if today is not Friday), to see", "return result def format_notes(notes): if not notes or (len(notes) ==", "hours and 30 minutes actual_h += int(work_minutes / 60) actual_m", "start_minutes minutes += minutes_day hours_day = int(minutes_day / 60) hours_day_without_lunch", "str(date.format('YYYYMM')) return (rows, date_str) def export_csv(api, timesheet_url, arg_date): rows, date", "COL_LUNCH = 4 COL_TIME = 5 # includes lunch COL_TIME_FIXED", "\":\"): parts = str(hour).split(split_char) try: local_hours = int(parts[0]) local_minutes =", "+= (total_minutes / 60) total_minutes = total_minutes % 60 total_time", "else: half_day = worked_date in half_days # each workday has", "\"Thursday\") wfh = wfh.replace(\"Fri\", \"Friday\") wfh = wfh.replace(\", \", \",\").replace(\",\",", "found for %s\" % use_date) def get_timesheet_for_date(rows, date, user_full_name): #", "task breakdown for the month and time spend on each", "wfh) if wfh != \"\" else \"all days\" # 2021-01-04", "aka 4. work_hours_for_the_day = 8 if not half_day else 4", "date)) return None found_row = result_rows[0] found_index = rows.index(found_row) start_val", "# username|date|task|duration|work_type|details f.writerow([\"daniel\", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, \"%dm\" % (duration_minutes), work_type,", "+ str(minutes_day).zfill(2) print(\"%s: %s to %s = %s (without lunch:", "str(days)) print(\"Total hours: %s:%s (with 1 hour lunch: %s:%s)\" %", "None else first[COL_DATE]) print(\"Last:\", \"<last> not found\" if last is", "8, aka 4. work_hours_for_the_day = 8 if not half_day else", "4 COL_TIME = 5 # includes lunch COL_TIME_FIXED = 6", "= 3 COL_LUNCH = 4 COL_TIME = 5 # includes", "on half days it is only half of 8, aka", "local_minutes except: if len(parts) == 1: try: return int(parts[0]), 0", "\"Development\" # Needed CSV columns # username|date|task|duration|work_type|details f.writerow([\"daniel\", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'),", "does not include lunch COL_MOVE = 7 COL_WORK_FROM_HOME = 8", "'YYYYMMDD').format('YYYY.MM.DD'), task, \"%dm\" % (duration_minutes), work_type, details]) # regex: ([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))", "== 1: try: return int(parts[0]), 0 except: return 0, 0", "weekday = (found_row[COL_WEEKDAY] or \"\").lower() check_start_index = found_index if weekday.startswith(\"fr\")", "is_same_day = True wfh = u\"\" if len(check_row)-1 < COL_WORK_FROM_HOME", "= int(parts[1]) if half_it: local_hours = local_hours / 2 local_minutes", "([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\])) # text: SCAN-4167 As a developer, I want to", "row[COL_TIME_START] if max_cols >= COL_TIME_START else None time_end = row[COL_TIME_END]", "if is_same_day else \"This week\" wfh_info = \"\"\"%s %s\"\"\" %", "duration_minutes, details = \"\"): work_type = \"Meeting\" if \"meeting\" in", "\"meeting\" in details.lower() else \"Development\" # Needed CSV columns #", "show summed up hours and minutes for the given/current month\")", "sub_tasks[0] # main task for sub_task in sub_tasks[1:]: # actual", "time = day_type hours.append(time) dates.append(date) continue elif not tasks: continue", "task try: g = r.match(t).groups() except Exception as ex: print(\"ERROR:", "in %s: %s\" % (date, total_time)) print(\"\") print(\"*\" * 50)", "result += '\\n' else: result += '\\n* ' + task", "= os.path.join(CURRENT_PATH, \"client-secrets.json\") cachefile = os.path.join(CURRENT_PATH, \"client-secrets-cache.json\") if not os.path.exists(filename):", "check_row = rows[check_start_index] if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or", "format the output\") print(\" as CSV\") print(\"- daily: same as", "row in rows if row and str(row[COL_DATE]) == date] if", "= calc(hour, date in half_days) total_hours += local_hours total_minutes +=", "wfh_info = \"at all times, unless mentioned otherwise below\" #", "time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None date", "work_hours = 0 work_minutes = 0 days = 0 expected_hours_accumulated_total", "week [ # 1h r = re.compile(r\"([a-zA-Z].+-\\d+)(.*)((?<=\\[).+(?=\\]))\") for row in", "result += '\\n* ' + sub_tasks[0] # main task for" ]
[ "constants.FOLD # People who have gone all-in continue to be", "max_bets[calls]) # Reset the bets and countdown current_bets[calls, player_idx] =", "self.N_PLAYERS-1] = 0 if np.max(round_countdown[running_games]) <= 0: return current_bets, (round,", "self.BIG_BLIND = BIG_BLIND self.logger = logger self.N_PLAYERS = 6 def", "1 lower than the lowest score a hand can have", "community_cards, hole_cards, contenders): evaluator = treys.Evaluator() # 7463 = 1", ":param prev_round_investment: np.ndarray(batchsize, n_players) = int :param folded: np.ndarray(batchsize, n_players)", "continue to be all-in actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx] ==", "evaluate_hands(self, community_cards, hole_cards, contenders): evaluator = treys.Evaluator() # 7463 =", "total_winnings += participants * gains[:, None] total_winnings -= prev_round_investment self.logger.log(constants.EV_END_GAME,", "for p in players], folded, hole_cards)) self.logger.save_to_file() for player_idx, player", ":3]) prev_round_investment += bets # Turn bets, _ = self.run_round(players,", "= np.zeros(self.BATCH_SIZE, dtype=int) round_countdown[:] = self.N_PLAYERS if round == constants.PRE_FLOP:", "actions == constants.CALL))[0] if calls.size > 0: investment = np.minimum(self.INITIAL_CAPITAL", "# print(\"True raises\", raises, amounts[raises]) investment = np.maximum(current_bets[raises, player_idx] +", "None] # Get the number of times each pot will", "self.BATCH_SIZE = BATCH_SIZE self.INITIAL_CAPITAL = INITIAL_CAPITAL self.SMALL_BLIND = SMALL_BLIND self.BIG_BLIND", "running_games = np.nonzero(round_countdown > 0)[0] for player_idx, player in player_order:", "pots will be split participants = hand_scores == sorted_hands[:, 0][:,", "prev_round_investment += bets # Flop bets, _ = self.run_round(players, prev_round_investment,", "not broken, is there another reason? round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] =", "player_idx, :], community_cards, total_winnings[:, player_idx]) return total_winnings def run_round(self, players,", "self.logger = logger self.N_PLAYERS = 6 def generate_cards(self): cards =", "player in player_order: actions, amounts = player.act(player_idx, round, round_countdown >", "current_bets[:, 0] = self.SMALL_BLIND current_bets[:, 1] = self.BIG_BLIND max_bets[:] =", "if all folded stops game, improves performance but breaks tests", "everyone who has the best hand and among which pots", ":param hole_cards: np.ndarray(batchsize, n_players, 2) = treys.Card :param community_cards: np.ndarray(batchsize,", "total_winnings, [str(p) for p in players], folded, hole_cards)) self.logger.save_to_file() for", "round_countdown[raises] = self.N_PLAYERS last_raiser[raises] = player_idx ########### # FOLDING #", "self.logger.save_to_file() for player_idx, player in enumerate(players): round, current_bets, min_raise, prev_round_investment,", "= treys.Evaluator() # 7463 = 1 lower than the lowest", "BATCH_SIZE self.INITIAL_CAPITAL = INITIAL_CAPITAL self.SMALL_BLIND = SMALL_BLIND self.BIG_BLIND = BIG_BLIND", "= np.zeros(self.BATCH_SIZE, dtype=int) player_order = list(enumerate(players)) round_countdown = np.zeros(self.BATCH_SIZE, dtype=int)", "- max_bets[raises] max_bets[raises] = investment # Reset the bets and", "all-in continue to be all-in actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx]", "np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises] + min_raise[raises]) min_raise[raises] = investment", "self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded)) ranks = np.argsort(hand_scores, axis=1) sorted_hands = np.take_along_axis(hand_scores,", "# FOLDING # ########### folded[np.where(np.logical_and(round_countdown > 0, actions == constants.FOLD))[0],", "necessary because it bloats the log size (by ~500 kB", "another reason? round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0 if np.max(round_countdown[running_games]) <=", "np.zeros(self.BATCH_SIZE, dtype=int) min_raise[:] = self.BIG_BLIND last_raiser = np.zeros(self.BATCH_SIZE, dtype=int) player_order", "results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int) for game_idx,community in enumerate(community_cards):", "> 0: investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls]) #", "SMALL_BLIND, BIG_BLIND, logger): self.BATCH_SIZE = BATCH_SIZE self.INITIAL_CAPITAL = INITIAL_CAPITAL self.SMALL_BLIND", "+= bets # Showdown pool = np.sum(prev_round_investment, axis=1) total_winnings =", "+ amounts[raises], max_bets[raises] + min_raise[raises]) min_raise[raises] = investment - max_bets[raises]", "_ = self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:, :4]) prev_round_investment", "# self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts, round_countdown, folded[:, player_idx])) #", "prev_round_investment self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for p in players], folded,", "dtype=int) min_raise[:] = self.BIG_BLIND last_raiser = np.zeros(self.BATCH_SIZE, dtype=int) player_order =", "prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:, :3]) prev_round_investment += bets #", "INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger): self.BATCH_SIZE = BATCH_SIZE self.INITIAL_CAPITAL = INITIAL_CAPITAL", "if raises.size > 0: # print(\"True raises\", raises, amounts[raises]) investment", "1] = constants.FOLD # People who have gone all-in continue", "raises, amounts[raises]) investment = np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises] +", "!= self.N_PLAYERS: raise ValueError('Only {} players allowed'.format(self.N_PLAYERS)) community_cards, hole_cards =", "# Disabled when not necessary because it bloats the log", "{0..3} :param hole_cards: np.ndarray(batchsize, n_players, 2) = treys.Card :param community_cards:", "game, improves performance but breaks tests # test is not", "n_splits_per_game total_winnings += participants * gains[:, None] total_winnings -= prev_round_investment", "of times each pot will be split n_splits_per_game = participants.sum(axis=1)", "logger self.N_PLAYERS = 6 def generate_cards(self): cards = np.tile(np.arange(52), (self.BATCH_SIZE,", "= player_idx ########### # FOLDING # ########### folded[np.where(np.logical_and(round_countdown > 0,", "but breaks tests # test is not broken, is there", ":param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card :return: current_bets: np.ndarray(batchsize,", "min_raise[:] = self.BIG_BLIND last_raiser = np.zeros(self.BATCH_SIZE, dtype=int) player_order = list(enumerate(players))", "continue to fold actions[folded[:, player_idx] == 1] = constants.FOLD #", "last_raiser) def evaluate_hands(self, community_cards, hole_cards, contenders): evaluator = treys.Evaluator() #", "def evaluate_hands(self, community_cards, hole_cards, contenders): evaluator = treys.Evaluator() # 7463", "community_cards = cards[:, :5] hole_cards = np.reshape(cards[:, 5:5 + 2", "hole_cards: np.ndarray(batchsize, n_players, 2) = treys.Card :param community_cards: np.ndarray(batchsize, n_players,", "= participants.sum(axis=1) # Split and distribute the money gains =", "# Get everyone who has the best hand and among", "<= 0: return current_bets, (round, current_bets, min_raise, prev_round_investment, folded, last_raiser)", "~500 kB or so, which triples the size) # self.logger.log(constants.EV_PLAYER_ACTION,", ":param round: int ∈ {0..3} :param hole_cards: np.ndarray(batchsize, n_players, 2)", ":return: current_bets: np.ndarray(batchsize, n_players)=int {0-200} \"\"\" current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS),", "len(players) != self.N_PLAYERS: raise ValueError('Only {} players allowed'.format(self.N_PLAYERS)) community_cards, hole_cards", "player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS) # Pre-flop bets, _ = self.run_round(players, prev_round_investment,", "+= bets # River bets, end_state = self.run_round(players, prev_round_investment, folded,", "n_players, 2) = treys.Card :param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) =", "community_cards[:, :0]) prev_round_investment += bets # Flop bets, _ =", "run_game(self, players): if len(players) != self.N_PLAYERS: raise ValueError('Only {} players", "Pre-flop bets, _ = self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:,", "self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:, :3]) prev_round_investment += bets", "= hand_scores == sorted_hands[:, 0][:, None] # Get the number", "hole_cards, community_cards[:, :0]) prev_round_investment += bets # Flop bets, _", "investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls]) # Reset the", "participants.sum(axis=1) # Split and distribute the money gains = pool", "########### # FOLDING # ########### folded[np.where(np.logical_and(round_countdown > 0, actions ==", "can have (scores are descending to 1) results = np.full((self.BATCH_SIZE,", "investment ########### # RAISING # ########### raises = np.where(np.logical_and(round_countdown >", "__init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger): self.BATCH_SIZE = BATCH_SIZE self.INITIAL_CAPITAL", "distribute the money gains = pool / n_splits_per_game total_winnings +=", ":] = FULL_DECK[np.random.permutation(cards[i, :])] community_cards = cards[:, :5] hole_cards =", "ranks = np.argsort(hand_scores, axis=1) sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1) #", "current_bets: np.ndarray(batchsize, n_players)=int {0-200} \"\"\" current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int)", "= self.BIG_BLIND last_raiser = np.zeros(self.BATCH_SIZE, dtype=int) player_order = list(enumerate(players)) round_countdown", "amounts = player.act(player_idx, round, round_countdown > 0, current_bets, min_raise, prev_round_investment,", "community_cards, hole_cards = self.generate_cards() folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool) prev_round_investment", "round, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards,", "player_idx] == self.INITIAL_CAPITAL] = constants.CALL ########### # CALLING # ###########", "constants.TURN, hole_cards, community_cards[:, :4]) prev_round_investment += bets # River bets,", "contenders): evaluator = treys.Evaluator() # 7463 = 1 lower than", "player_idx, actions, amounts, round_countdown, folded[:, player_idx])) # People who have", "min_raise[raises]) min_raise[raises] = investment - max_bets[raises] max_bets[raises] = investment #", "constants.FOLD))[0], player_idx] = 1 round_countdown[running_games] -= 1 #TODO: if all", "players], folded, hole_cards)) self.logger.save_to_file() for player_idx, player in enumerate(players): round,", "0)[0] for player_idx, player in player_order: actions, amounts = player.act(player_idx,", "len(players)), dtype=int) for player in players: player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS) #", "for i in range(self.BATCH_SIZE): cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])] community_cards", "be split n_splits_per_game = participants.sum(axis=1) # Split and distribute the", "if round == constants.PRE_FLOP: current_bets[:, 0] = self.SMALL_BLIND current_bets[:, 1]", "prev_round_investment += bets # River bets, end_state = self.run_round(players, prev_round_investment,", "participants * gains[:, None] total_winnings -= prev_round_investment self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings,", "hole_cards def run_game(self, players): if len(players) != self.N_PLAYERS: raise ValueError('Only", "improves performance but breaks tests # test is not broken,", "for game_idx,community in enumerate(community_cards): for player_idx,hole in enumerate(hole_cards[game_idx]): if contenders[game_idx,", "if calls.size > 0: investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx],", "total_winnings -= prev_round_investment self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for p in", "= BATCH_SIZE self.INITIAL_CAPITAL = INITIAL_CAPITAL self.SMALL_BLIND = SMALL_BLIND self.BIG_BLIND =", "players: [Player] :param prev_round_investment: np.ndarray(batchsize, n_players) = int :param folded:", "= np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises] + min_raise[raises]) min_raise[raises] =", "self.generate_cards() folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool) prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)),", "(self.BATCH_SIZE, 1)) for i in range(self.BATCH_SIZE): cards[i, :] = FULL_DECK[np.random.permutation(cards[i,", "folded, constants.RIVER, hole_cards, community_cards) prev_round_investment += bets # Showdown pool", "prev_round_investment += bets # Turn bets, _ = self.run_round(players, prev_round_investment,", "bets, _ = self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0])", "self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts, round_countdown, folded[:, player_idx])) # People", "np import pickle import treys import constants FULL_DECK = np.array(treys.Deck.GetFullDeck())", "hole_cards[:, player_idx, :], community_cards) # Disabled when not necessary because", "Disabled when not necessary because it bloats the log size", "lowest score a hand can have (scores are descending to", "0, actions == constants.FOLD))[0], player_idx] = 1 round_countdown[running_games] -= 1", "None] total_winnings -= prev_round_investment self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for p", "= np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int) max_bets = np.zeros(self.BATCH_SIZE, dtype=int) min_raise =", "constants.PRE_FLOP: current_bets[:, 0] = self.SMALL_BLIND current_bets[:, 1] = self.BIG_BLIND max_bets[:]", "max_bets[:] = self.BIG_BLIND player_order = player_order[2:] + player_order[:2] while True:", "end_state = self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards, community_cards) prev_round_investment +=", "+= bets # Flop bets, _ = self.run_round(players, prev_round_investment, folded,", "round, round_countdown > 0, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:,", "already folded continue to fold actions[folded[:, player_idx] == 1] =", "current_bets, min_raise, prev_round_investment, folded, last_raiser = end_state player.end_trajectory(player_idx, round, current_bets,", "for player_idx, player in player_order: actions, amounts = player.act(player_idx, round,", "= treys.Card :return: current_bets: np.ndarray(batchsize, n_players)=int {0-200} \"\"\" current_bets =", "BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger): self.BATCH_SIZE = BATCH_SIZE self.INITIAL_CAPITAL =", "hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded)) ranks = np.argsort(hand_scores, axis=1) sorted_hands", "end_state player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx,", "pot will be split n_splits_per_game = participants.sum(axis=1) # Split and", "round_countdown, folded[:, player_idx])) # People who have already folded continue", "== self.N_PLAYERS-1] = 0 if np.max(round_countdown[running_games]) <= 0: return current_bets,", "community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card :return: current_bets: np.ndarray(batchsize, n_players)=int", "self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:, :4]) prev_round_investment += bets", "have (scores are descending to 1) results = np.full((self.BATCH_SIZE, self.N_PLAYERS),", "dtype=int) max_bets = np.zeros(self.BATCH_SIZE, dtype=int) min_raise = np.zeros(self.BATCH_SIZE, dtype=int) min_raise[:]", "= self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards, community_cards) prev_round_investment += bets", "axis=1) # Get everyone who has the best hand and", "= investment ########### # RAISING # ########### raises = np.where(np.logical_and(round_countdown", "self.N_PLAYERS) # Pre-flop bets, _ = self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP,", ":param players: [Player] :param prev_round_investment: np.ndarray(batchsize, n_players) = int :param", "who have already folded continue to fold actions[folded[:, player_idx] ==", "if contenders[game_idx, player_idx]: results[game_idx, player_idx] = evaluator.evaluate(community.tolist(), hole.tolist()) return results", "self.N_PLAYERS = 6 def generate_cards(self): cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1))", "has the best hand and among which pots will be", "np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls]) # Reset the bets and", "BIG_BLIND self.logger = logger self.N_PLAYERS = 6 def generate_cards(self): cards", "7463 = 1 lower than the lowest score a hand", "dtype=int) for player in players: player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS) # Pre-flop", "- prev_round_investment[raises, player_idx]) round_countdown[raises] = self.N_PLAYERS last_raiser[raises] = player_idx ###########", "= 6 def generate_cards(self): cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1)) for", "player_idx]) round_countdown[raises] = self.N_PLAYERS last_raiser[raises] = player_idx ########### # FOLDING", "= np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx]) round_countdown[raises] = self.N_PLAYERS last_raiser[raises]", "i in range(self.BATCH_SIZE): cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])] community_cards =", "folded, last_raiser, hole_cards[:, player_idx, :], community_cards, total_winnings[:, player_idx]) return total_winnings", "= self.BIG_BLIND max_bets[:] = self.BIG_BLIND player_order = player_order[2:] + player_order[:2]", "be split participants = hand_scores == sorted_hands[:, 0][:, None] #", "min_raise, prev_round_investment, folded, last_raiser) def evaluate_hands(self, community_cards, hole_cards, contenders): evaluator", "= np.where(np.logical_and(round_countdown > 0, actions == constants.CALL))[0] if calls.size >", "in enumerate(community_cards): for player_idx,hole in enumerate(hole_cards[game_idx]): if contenders[game_idx, player_idx]: results[game_idx,", "np.zeros((self.BATCH_SIZE, len(players)), dtype=int) for player in players: player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS)", "the bets and countdown current_bets[calls, player_idx] = investment ########### #", "{} players allowed'.format(self.N_PLAYERS)) community_cards, hole_cards = self.generate_cards() folded = np.zeros((self.BATCH_SIZE,", "self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards, community_cards) prev_round_investment += bets #", "== self.INITIAL_CAPITAL] = constants.CALL ########### # CALLING # ########### calls", "prev_round_investment += bets # Showdown pool = np.sum(prev_round_investment, axis=1) total_winnings", "because it bloats the log size (by ~500 kB or", "triples the size) # self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts, round_countdown,", "def run_game(self, players): if len(players) != self.N_PLAYERS: raise ValueError('Only {}", "money gains = pool / n_splits_per_game total_winnings += participants *", "investment - max_bets[raises] max_bets[raises] = investment # Reset the bets", "will be split participants = hand_scores == sorted_hands[:, 0][:, None]", "0] = self.SMALL_BLIND current_bets[:, 1] = self.BIG_BLIND max_bets[:] = self.BIG_BLIND", "-= 1 #TODO: if all folded stops game, improves performance", "People who have gone all-in continue to be all-in actions[prev_round_investment[:,", "min_raise = np.zeros(self.BATCH_SIZE, dtype=int) min_raise[:] = self.BIG_BLIND last_raiser = np.zeros(self.BATCH_SIZE,", "########### # RAISING # ########### raises = np.where(np.logical_and(round_countdown > 0,", "= investment # Reset the bets and countdown current_bets[raises, player_idx]", "to fold actions[folded[:, player_idx] == 1] = constants.FOLD # People", "cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1)) for i in range(self.BATCH_SIZE): cards[i,", "prev_round_investment, folded, last_raiser) def evaluate_hands(self, community_cards, hole_cards, contenders): evaluator =", "_ = self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:, :3]) prev_round_investment", "have gone all-in continue to be all-in actions[prev_round_investment[:, player_idx] +", "= player.act(player_idx, round, round_countdown > 0, current_bets, min_raise, prev_round_investment, folded,", "current_bets, (round, current_bets, min_raise, prev_round_investment, folded, last_raiser) def evaluate_hands(self, community_cards,", "in player_order: actions, amounts = player.act(player_idx, round, round_countdown > 0,", "= bool :param round: int ∈ {0..3} :param hole_cards: np.ndarray(batchsize,", "0, actions == constants.RAISE))[0] if raises.size > 0: # print(\"True", "player_idx] = 1 round_countdown[running_games] -= 1 #TODO: if all folded", "round_countdown > 0, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx,", "in enumerate(hole_cards[game_idx]): if contenders[game_idx, player_idx]: results[game_idx, player_idx] = evaluator.evaluate(community.tolist(), hole.tolist())", "folded[:, player_idx])) # People who have already folded continue to", "constants FULL_DECK = np.array(treys.Deck.GetFullDeck()) class GameEngine: def __init__(self, BATCH_SIZE, INITIAL_CAPITAL,", "pool / n_splits_per_game total_winnings += participants * gains[:, None] total_winnings", "== constants.CALL))[0] if calls.size > 0: investment = np.minimum(self.INITIAL_CAPITAL -", "actions == constants.RAISE))[0] if raises.size > 0: # print(\"True raises\",", "Get everyone who has the best hand and among which", "* self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2)) return community_cards, hole_cards def run_game(self,", "#TODO: if all folded stops game, improves performance but breaks", "return total_winnings def run_round(self, players, prev_round_investment, folded, round, hole_cards, community_cards):", "size (by ~500 kB or so, which triples the size)", "= np.zeros(self.BATCH_SIZE, dtype=int) min_raise[:] = self.BIG_BLIND last_raiser = np.zeros(self.BATCH_SIZE, dtype=int)", "round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0 if np.max(round_countdown[running_games]) <= 0: return", "breaks tests # test is not broken, is there another", "2)) return community_cards, hole_cards def run_game(self, players): if len(players) !=", ":4]) prev_round_investment += bets # River bets, end_state = self.run_round(players,", "0: investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls]) # Reset", "np.nonzero(round_countdown > 0)[0] for player_idx, player in player_order: actions, amounts", "current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx]) round_countdown[raises] =", "7463, dtype=int) for game_idx,community in enumerate(community_cards): for player_idx,hole in enumerate(hole_cards[game_idx]):", "1] = self.BIG_BLIND max_bets[:] = self.BIG_BLIND player_order = player_order[2:] +", "in range(self.BATCH_SIZE): cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])] community_cards = cards[:,", "total_winnings def run_round(self, players, prev_round_investment, folded, round, hole_cards, community_cards): \"\"\"", "in enumerate(players): round, current_bets, min_raise, prev_round_investment, folded, last_raiser = end_state", "have already folded continue to fold actions[folded[:, player_idx] == 1]", "# Turn bets, _ = self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards,", "= np.take_along_axis(hand_scores, indices=ranks, axis=1) # Get everyone who has the", "round == constants.PRE_FLOP: current_bets[:, 0] = self.SMALL_BLIND current_bets[:, 1] =", "(by ~500 kB or so, which triples the size) #", "np.take_along_axis(hand_scores, indices=ranks, axis=1) # Get everyone who has the best", "who have gone all-in continue to be all-in actions[prev_round_investment[:, player_idx]", "= self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:, :4]) prev_round_investment +=", "who has the best hand and among which pots will", "dtype=int) min_raise = np.zeros(self.BATCH_SIZE, dtype=int) min_raise[:] = self.BIG_BLIND last_raiser =", "so, which triples the size) # self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions,", "0, actions == constants.CALL))[0] if calls.size > 0: investment =", "are descending to 1) results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int)", "+ player_order[:2] while True: running_games = np.nonzero(round_countdown > 0)[0] for", "community_cards): \"\"\" :param players: [Player] :param prev_round_investment: np.ndarray(batchsize, n_players) =", "folded[np.where(np.logical_and(round_countdown > 0, actions == constants.FOLD))[0], player_idx] = 1 round_countdown[running_games]", "folded, constants.FLOP, hole_cards, community_cards[:, :3]) prev_round_investment += bets # Turn", "player_idx])) # People who have already folded continue to fold", "the log size (by ~500 kB or so, which triples", "for player in players: player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS) # Pre-flop bets,", "pool = np.sum(prev_round_investment, axis=1) total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float) hand_scores", "# RAISING # ########### raises = np.where(np.logical_and(round_countdown > 0, actions", "/ n_splits_per_game total_winnings += participants * gains[:, None] total_winnings -=", "self.INITIAL_CAPITAL, self.N_PLAYERS) # Pre-flop bets, _ = self.run_round(players, prev_round_investment, folded,", "River bets, end_state = self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards, community_cards)", "self.BIG_BLIND max_bets[:] = self.BIG_BLIND player_order = player_order[2:] + player_order[:2] while", "[str(p) for p in players], folded, hole_cards)) self.logger.save_to_file() for player_idx,", "= np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int) for game_idx,community in enumerate(community_cards): for", "investment # Reset the bets and countdown current_bets[raises, player_idx] =", "########### raises = np.where(np.logical_and(round_countdown > 0, actions == constants.RAISE))[0] if", "folded, hole_cards)) self.logger.save_to_file() for player_idx, player in enumerate(players): round, current_bets,", "is not broken, is there another reason? round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1]", "player_idx]) return total_winnings def run_round(self, players, prev_round_investment, folded, round, hole_cards,", "current_bets[:, player_idx] == self.INITIAL_CAPITAL] = constants.CALL ########### # CALLING #", "= 1 lower than the lowest score a hand can", "self.SMALL_BLIND = SMALL_BLIND self.BIG_BLIND = BIG_BLIND self.logger = logger self.N_PLAYERS", "folded: np.ndarray(batchsize, n_players) = bool :param round: int ∈ {0..3}", "player_order: actions, amounts = player.act(player_idx, round, round_countdown > 0, current_bets,", "self.BIG_BLIND last_raiser = np.zeros(self.BATCH_SIZE, dtype=int) player_order = list(enumerate(players)) round_countdown =", "bool :param round: int ∈ {0..3} :param hole_cards: np.ndarray(batchsize, n_players,", "(round, current_bets, min_raise, prev_round_investment, folded, last_raiser) def evaluate_hands(self, community_cards, hole_cards,", "= np.sum(prev_round_investment, axis=1) total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float) hand_scores =", "player_idx, player in player_order: actions, amounts = player.act(player_idx, round, round_countdown", "########### folded[np.where(np.logical_and(round_countdown > 0, actions == constants.FOLD))[0], player_idx] = 1", "= treys.Card :param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card :return:", "the number of times each pot will be split n_splits_per_game", "prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards) # Disabled when", "bloats the log size (by ~500 kB or so, which", "np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx]) round_countdown[raises] = self.N_PLAYERS last_raiser[raises] =", "dtype=int) for game_idx,community in enumerate(community_cards): for player_idx,hole in enumerate(hole_cards[game_idx]): if", "np.zeros(self.BATCH_SIZE, dtype=int) player_order = list(enumerate(players)) round_countdown = np.zeros(self.BATCH_SIZE, dtype=int) round_countdown[:]", "fold actions[folded[:, player_idx] == 1] = constants.FOLD # People who", "cards[:, :5] hole_cards = np.reshape(cards[:, 5:5 + 2 * self.N_PLAYERS],", "np.reshape(cards[:, 5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2)) return", "times each pot will be split n_splits_per_game = participants.sum(axis=1) #", "cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])] community_cards = cards[:, :5] hole_cards", "self.N_PLAYERS), dtype=float) hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded)) ranks = np.argsort(hand_scores,", "> 0, actions == constants.RAISE))[0] if raises.size > 0: #", "+= participants * gains[:, None] total_winnings -= prev_round_investment self.logger.log(constants.EV_END_GAME, (hand_scores,", "np.where(np.logical_and(round_countdown > 0, actions == constants.CALL))[0] if calls.size > 0:", "= pool / n_splits_per_game total_winnings += participants * gains[:, None]", "player_order[2:] + player_order[:2] while True: running_games = np.nonzero(round_countdown > 0)[0]", "Flop bets, _ = self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:,", "dtype=float) hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded)) ranks = np.argsort(hand_scores, axis=1)", "0, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards)", "n_players) = int :param folded: np.ndarray(batchsize, n_players) = bool :param", "to 1) results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int) for game_idx,community", "np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int) for game_idx,community in enumerate(community_cards): for player_idx,hole", "= self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:, :3]) prev_round_investment +=", "return community_cards, hole_cards def run_game(self, players): if len(players) != self.N_PLAYERS:", "player_idx ########### # FOLDING # ########### folded[np.where(np.logical_and(round_countdown > 0, actions", "= self.BIG_BLIND player_order = player_order[2:] + player_order[:2] while True: running_games", "= cards[:, :5] hole_cards = np.reshape(cards[:, 5:5 + 2 *", "# People who have already folded continue to fold actions[folded[:,", "== sorted_hands[:, 0][:, None] # Get the number of times", "self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0]) prev_round_investment += bets", "* gains[:, None] total_winnings -= prev_round_investment self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p)", ":5] hole_cards = np.reshape(cards[:, 5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE,", "== constants.PRE_FLOP: current_bets[:, 0] = self.SMALL_BLIND current_bets[:, 1] = self.BIG_BLIND", "0: # print(\"True raises\", raises, amounts[raises]) investment = np.maximum(current_bets[raises, player_idx]", "= self.generate_cards() folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool) prev_round_investment = np.zeros((self.BATCH_SIZE,", "2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2)) return community_cards, hole_cards def", "or so, which triples the size) # self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx,", "= constants.FOLD # People who have gone all-in continue to", "# Get the number of times each pot will be", "amounts, round_countdown, folded[:, player_idx])) # People who have already folded", "raises\", raises, amounts[raises]) investment = np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises]", "prev_round_investment[raises, player_idx]) round_countdown[raises] = self.N_PLAYERS last_raiser[raises] = player_idx ########### #", "= FULL_DECK[np.random.permutation(cards[i, :])] community_cards = cards[:, :5] hole_cards = np.reshape(cards[:,", "max_bets[raises] max_bets[raises] = investment # Reset the bets and countdown", "FULL_DECK = np.array(treys.Deck.GetFullDeck()) class GameEngine: def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND,", "gone all-in continue to be all-in actions[prev_round_investment[:, player_idx] + current_bets[:,", "Reset the bets and countdown current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL", "np.argsort(hand_scores, axis=1) sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1) # Get everyone", "if len(players) != self.N_PLAYERS: raise ValueError('Only {} players allowed'.format(self.N_PLAYERS)) community_cards,", "min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards, total_winnings[:, player_idx])", "= self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0]) prev_round_investment +=", "_ = self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0]) prev_round_investment", "last_raiser = end_state player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment, folded, last_raiser,", "return current_bets, (round, current_bets, min_raise, prev_round_investment, folded, last_raiser) def evaluate_hands(self,", "int ∈ {0..3} :param hole_cards: np.ndarray(batchsize, n_players, 2) = treys.Card", "bets # River bets, end_state = self.run_round(players, prev_round_investment, folded, constants.RIVER,", "constants.RAISE))[0] if raises.size > 0: # print(\"True raises\", raises, amounts[raises])", "self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx]) round_countdown[raises] = self.N_PLAYERS last_raiser[raises] = player_idx", "= self.N_PLAYERS if round == constants.PRE_FLOP: current_bets[:, 0] = self.SMALL_BLIND", "prev_round_investment, folded, constants.RIVER, hole_cards, community_cards) prev_round_investment += bets # Showdown", "n_splits_per_game = participants.sum(axis=1) # Split and distribute the money gains", "round, current_bets, min_raise, prev_round_investment, folded, last_raiser = end_state player.end_trajectory(player_idx, round,", "player_idx] = np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx]) round_countdown[raises] = self.N_PLAYERS", "players): if len(players) != self.N_PLAYERS: raise ValueError('Only {} players allowed'.format(self.N_PLAYERS))", "gains[:, None] total_winnings -= prev_round_investment self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for", "evaluator = treys.Evaluator() # 7463 = 1 lower than the", "> 0: # print(\"True raises\", raises, amounts[raises]) investment = np.maximum(current_bets[raises,", "1 round_countdown[running_games] -= 1 #TODO: if all folded stops game,", "constants.PRE_FLOP, hole_cards, community_cards[:, :0]) prev_round_investment += bets # Flop bets,", "+ current_bets[:, player_idx] == self.INITIAL_CAPITAL] = constants.CALL ########### # CALLING", "current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards) #", "list(enumerate(players)) round_countdown = np.zeros(self.BATCH_SIZE, dtype=int) round_countdown[:] = self.N_PLAYERS if round", "> 0, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :],", "min_raise, prev_round_investment, folded, last_raiser = end_state player.end_trajectory(player_idx, round, current_bets, min_raise,", "= player_order[2:] + player_order[:2] while True: running_games = np.nonzero(round_countdown >", "bets # Showdown pool = np.sum(prev_round_investment, axis=1) total_winnings = np.zeros((self.BATCH_SIZE,", "5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2)) return community_cards,", "players, prev_round_investment, folded, round, hole_cards, community_cards): \"\"\" :param players: [Player]", "countdown current_bets[calls, player_idx] = investment ########### # RAISING # ###########", "== 1] = constants.FOLD # People who have gone all-in", "range(self.BATCH_SIZE): cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])] community_cards = cards[:, :5]", "hole_cards, community_cards): \"\"\" :param players: [Player] :param prev_round_investment: np.ndarray(batchsize, n_players)", "= end_state player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:,", "np.zeros(self.BATCH_SIZE, dtype=int) round_countdown[:] = self.N_PLAYERS if round == constants.PRE_FLOP: current_bets[:,", "constants.FLOP, hole_cards, community_cards[:, :3]) prev_round_investment += bets # Turn bets,", "current_bets, min_raise, prev_round_investment, folded, last_raiser) def evaluate_hands(self, community_cards, hole_cards, contenders):", "player_order = list(enumerate(players)) round_countdown = np.zeros(self.BATCH_SIZE, dtype=int) round_countdown[:] = self.N_PLAYERS", "import constants FULL_DECK = np.array(treys.Deck.GetFullDeck()) class GameEngine: def __init__(self, BATCH_SIZE,", "allowed'.format(self.N_PLAYERS)) community_cards, hole_cards = self.generate_cards() folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool)", "while True: running_games = np.nonzero(round_countdown > 0)[0] for player_idx, player", "current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int) max_bets = np.zeros(self.BATCH_SIZE, dtype=int) min_raise", "np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card :return: current_bets: np.ndarray(batchsize, n_players)=int {0-200}", "dtype=int) round_countdown[:] = self.N_PLAYERS if round == constants.PRE_FLOP: current_bets[:, 0]", "round_countdown = np.zeros(self.BATCH_SIZE, dtype=int) round_countdown[:] = self.N_PLAYERS if round ==", "(hand_scores, total_winnings, [str(p) for p in players], folded, hole_cards)) self.logger.save_to_file()", "self.N_PLAYERS), 7463, dtype=int) for game_idx,community in enumerate(community_cards): for player_idx,hole in", "<gh_stars>0 import numpy as np import pickle import treys import", "np.sum(prev_round_investment, axis=1) total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float) hand_scores = self.evaluate_hands(community_cards,", "countdown current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx]) round_countdown[raises]", "hole_cards, contenders): evaluator = treys.Evaluator() # 7463 = 1 lower", "GameEngine: def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger): self.BATCH_SIZE =", "hole_cards, community_cards) prev_round_investment += bets # Showdown pool = np.sum(prev_round_investment,", "hole_cards[:, player_idx, :], community_cards, total_winnings[:, player_idx]) return total_winnings def run_round(self,", "1 #TODO: if all folded stops game, improves performance but", "folded, last_raiser) def evaluate_hands(self, community_cards, hole_cards, contenders): evaluator = treys.Evaluator()", "the size) # self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts, round_countdown, folded[:,", "+= bets # Turn bets, _ = self.run_round(players, prev_round_investment, folded,", "Get the number of times each pot will be split", "== constants.FOLD))[0], player_idx] = 1 round_countdown[running_games] -= 1 #TODO: if", "# Reset the bets and countdown current_bets[calls, player_idx] = investment", "np.max(round_countdown[running_games]) <= 0: return current_bets, (round, current_bets, min_raise, prev_round_investment, folded,", "enumerate(hole_cards[game_idx]): if contenders[game_idx, player_idx]: results[game_idx, player_idx] = evaluator.evaluate(community.tolist(), hole.tolist()) return", "hand and among which pots will be split participants =", "and among which pots will be split participants = hand_scores", "bets, _ = self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:, :3])", "each pot will be split n_splits_per_game = participants.sum(axis=1) # Split", "= np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls]) # Reset the bets", "actions[folded[:, player_idx] == 1] = constants.FOLD # People who have", "test is not broken, is there another reason? round_countdown[folded.sum(axis=1) ==", "folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0]) prev_round_investment += bets # Flop", "import numpy as np import pickle import treys import constants", "the lowest score a hand can have (scores are descending", "folded, round, hole_cards, community_cards): \"\"\" :param players: [Player] :param prev_round_investment:", "player_order = player_order[2:] + player_order[:2] while True: running_games = np.nonzero(round_countdown", "prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0]) prev_round_investment += bets #", "self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for p in players], folded, hole_cards))", "bets and countdown current_bets[calls, player_idx] = investment ########### # RAISING", "np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float) hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded)) ranks =", "round_countdown[:] = self.N_PLAYERS if round == constants.PRE_FLOP: current_bets[:, 0] =", "(scores are descending to 1) results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463,", "= np.nonzero(round_countdown > 0)[0] for player_idx, player in player_order: actions,", "hole_cards, community_cards[:, :4]) prev_round_investment += bets # River bets, end_state", "game_idx,community in enumerate(community_cards): for player_idx,hole in enumerate(hole_cards[game_idx]): if contenders[game_idx, player_idx]:", "# Showdown pool = np.sum(prev_round_investment, axis=1) total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS),", "6 def generate_cards(self): cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1)) for i", "axis=1) sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1) # Get everyone who", "= np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float) hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded)) ranks", "True: running_games = np.nonzero(round_countdown > 0)[0] for player_idx, player in", "treys import constants FULL_DECK = np.array(treys.Deck.GetFullDeck()) class GameEngine: def __init__(self,", "treys.Card :param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card :return: current_bets:", "= np.zeros((self.BATCH_SIZE, len(players)), dtype=int) for player in players: player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL,", "hole_cards = self.generate_cards() folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool) prev_round_investment =", "self.N_PLAYERS, 2)) return community_cards, hole_cards def run_game(self, players): if len(players)", "= INITIAL_CAPITAL self.SMALL_BLIND = SMALL_BLIND self.BIG_BLIND = BIG_BLIND self.logger =", "folded stops game, improves performance but breaks tests # test", "import pickle import treys import constants FULL_DECK = np.array(treys.Deck.GetFullDeck()) class", "\"\"\" :param players: [Player] :param prev_round_investment: np.ndarray(batchsize, n_players) = int", "prev_round_investment[calls, player_idx], max_bets[calls]) # Reset the bets and countdown current_bets[calls,", "RAISING # ########### raises = np.where(np.logical_and(round_countdown > 0, actions ==", "which pots will be split participants = hand_scores == sorted_hands[:,", "max_bets[raises] + min_raise[raises]) min_raise[raises] = investment - max_bets[raises] max_bets[raises] =", "0][:, None] # Get the number of times each pot", "not necessary because it bloats the log size (by ~500", "logger): self.BATCH_SIZE = BATCH_SIZE self.INITIAL_CAPITAL = INITIAL_CAPITAL self.SMALL_BLIND = SMALL_BLIND", "axis=1) total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float) hand_scores = self.evaluate_hands(community_cards, hole_cards,", "+ min_raise[raises]) min_raise[raises] = investment - max_bets[raises] max_bets[raises] = investment", "1)) for i in range(self.BATCH_SIZE): cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])]", "and countdown current_bets[calls, player_idx] = investment ########### # RAISING #", "self.INITIAL_CAPITAL] = constants.CALL ########### # CALLING # ########### calls =", "# ########### folded[np.where(np.logical_and(round_countdown > 0, actions == constants.FOLD))[0], player_idx] =", "reason? round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0 if np.max(round_countdown[running_games]) <= 0:", "kB or so, which triples the size) # self.logger.log(constants.EV_PLAYER_ACTION, (round,", "a hand can have (scores are descending to 1) results", "0: return current_bets, (round, current_bets, min_raise, prev_round_investment, folded, last_raiser) def", "the bets and countdown current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL -", "score a hand can have (scores are descending to 1)", "prev_round_investment: np.ndarray(batchsize, n_players) = int :param folded: np.ndarray(batchsize, n_players) =", "Turn bets, _ = self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:,", "CALLING # ########### calls = np.where(np.logical_and(round_countdown > 0, actions ==", "community_cards) prev_round_investment += bets # Showdown pool = np.sum(prev_round_investment, axis=1)", "# test is not broken, is there another reason? round_countdown[folded.sum(axis=1)", "treys.Evaluator() # 7463 = 1 lower than the lowest score", "player_idx, :], community_cards) # Disabled when not necessary because it", "# River bets, end_state = self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards,", "self.N_PLAYERS last_raiser[raises] = player_idx ########### # FOLDING # ########### folded[np.where(np.logical_and(round_countdown", "n_players) = bool :param round: int ∈ {0..3} :param hole_cards:", "all folded stops game, improves performance but breaks tests #", "hole_cards, np.logical_not(folded)) ranks = np.argsort(hand_scores, axis=1) sorted_hands = np.take_along_axis(hand_scores, indices=ranks,", "the money gains = pool / n_splits_per_game total_winnings += participants", "self.N_PLAYERS: raise ValueError('Only {} players allowed'.format(self.N_PLAYERS)) community_cards, hole_cards = self.generate_cards()", "round: int ∈ {0..3} :param hole_cards: np.ndarray(batchsize, n_players, 2) =", "last_raiser[raises] = player_idx ########### # FOLDING # ########### folded[np.where(np.logical_and(round_countdown >", "hole_cards = np.reshape(cards[:, 5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS,", "player.act(player_idx, round, round_countdown > 0, current_bets, min_raise, prev_round_investment, folded, last_raiser,", "to be all-in actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx] == self.INITIAL_CAPITAL]", "import treys import constants FULL_DECK = np.array(treys.Deck.GetFullDeck()) class GameEngine: def", "for player_idx, player in enumerate(players): round, current_bets, min_raise, prev_round_investment, folded,", "max_bets = np.zeros(self.BATCH_SIZE, dtype=int) min_raise = np.zeros(self.BATCH_SIZE, dtype=int) min_raise[:] =", "FULL_DECK[np.random.permutation(cards[i, :])] community_cards = cards[:, :5] hole_cards = np.reshape(cards[:, 5:5", "# Flop bets, _ = self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards,", "and distribute the money gains = pool / n_splits_per_game total_winnings", "sorted_hands[:, 0][:, None] # Get the number of times each", "and countdown current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx])", "which triples the size) # self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts,", "hole_cards)) self.logger.save_to_file() for player_idx, player in enumerate(players): round, current_bets, min_raise,", "= int :param folded: np.ndarray(batchsize, n_players) = bool :param round:", "People who have already folded continue to fold actions[folded[:, player_idx]", "== constants.RAISE))[0] if raises.size > 0: # print(\"True raises\", raises,", "# 7463 = 1 lower than the lowest score a", "np.logical_not(folded)) ranks = np.argsort(hand_scores, axis=1) sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1)", "folded, last_raiser = end_state player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment, folded,", "np.ndarray(batchsize, n_players) = int :param folded: np.ndarray(batchsize, n_players) = bool", "dtype=int) player_order = list(enumerate(players)) round_countdown = np.zeros(self.BATCH_SIZE, dtype=int) round_countdown[:] =", "player_idx] + amounts[raises], max_bets[raises] + min_raise[raises]) min_raise[raises] = investment -", "- prev_round_investment[calls, player_idx], max_bets[calls]) # Reset the bets and countdown", "self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2)) return community_cards, hole_cards def run_game(self, players):", "all-in actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx] == self.INITIAL_CAPITAL] = constants.CALL", "self.BIG_BLIND player_order = player_order[2:] + player_order[:2] while True: running_games =", "is there another reason? round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0 if", "def run_round(self, players, prev_round_investment, folded, round, hole_cards, community_cards): \"\"\" :param", "treys.Card :return: current_bets: np.ndarray(batchsize, n_players)=int {0-200} \"\"\" current_bets = np.zeros((self.BATCH_SIZE,", "generate_cards(self): cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1)) for i in range(self.BATCH_SIZE):", "pickle import treys import constants FULL_DECK = np.array(treys.Deck.GetFullDeck()) class GameEngine:", "player_idx] == 1] = constants.FOLD # People who have gone", "player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :],", "actions == constants.FOLD))[0], player_idx] = 1 round_countdown[running_games] -= 1 #TODO:", "raise ValueError('Only {} players allowed'.format(self.N_PLAYERS)) community_cards, hole_cards = self.generate_cards() folded", "prev_round_investment, folded, last_raiser = end_state player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment,", "lower than the lowest score a hand can have (scores", "= np.zeros((self.BATCH_SIZE, len(players)), dtype=bool) prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int) for", "than the lowest score a hand can have (scores are", "run_round(self, players, prev_round_investment, folded, round, hole_cards, community_cards): \"\"\" :param players:", ":], community_cards, total_winnings[:, player_idx]) return total_winnings def run_round(self, players, prev_round_investment,", "best hand and among which pots will be split participants", "np.ndarray(batchsize, n_players)=int {0-200} \"\"\" current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int) max_bets", "+ 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2)) return community_cards, hole_cards", "np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int) max_bets = np.zeros(self.BATCH_SIZE, dtype=int) min_raise = np.zeros(self.BATCH_SIZE,", "len(players)), dtype=bool) prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int) for player in", "self.INITIAL_CAPITAL = INITIAL_CAPITAL self.SMALL_BLIND = SMALL_BLIND self.BIG_BLIND = BIG_BLIND self.logger", "SMALL_BLIND self.BIG_BLIND = BIG_BLIND self.logger = logger self.N_PLAYERS = 6", "community_cards, hole_cards def run_game(self, players): if len(players) != self.N_PLAYERS: raise", "Split and distribute the money gains = pool / n_splits_per_game", "p in players], folded, hole_cards)) self.logger.save_to_file() for player_idx, player in", "# Split and distribute the money gains = pool /", "\"\"\" current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int) max_bets = np.zeros(self.BATCH_SIZE, dtype=int)", "= np.argsort(hand_scores, axis=1) sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1) # Get", "constants.CALL ########### # CALLING # ########### calls = np.where(np.logical_and(round_countdown >", "∈ {0..3} :param hole_cards: np.ndarray(batchsize, n_players, 2) = treys.Card :param", "# Pre-flop bets, _ = self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards,", "Reset the bets and countdown current_bets[calls, player_idx] = investment ###########", "int :param folded: np.ndarray(batchsize, n_players) = bool :param round: int", "player_idx, player in enumerate(players): round, current_bets, min_raise, prev_round_investment, folded, last_raiser", "when not necessary because it bloats the log size (by", "last_raiser, hole_cards[:, player_idx, :], community_cards) # Disabled when not necessary", "if np.max(round_countdown[running_games]) <= 0: return current_bets, (round, current_bets, min_raise, prev_round_investment,", "numpy as np import pickle import treys import constants FULL_DECK", "# Reset the bets and countdown current_bets[raises, player_idx] = np.minimum(investment,", "community_cards) # Disabled when not necessary because it bloats the", "self.SMALL_BLIND current_bets[:, 1] = self.BIG_BLIND max_bets[:] = self.BIG_BLIND player_order =", "current_bets[calls, player_idx] = investment ########### # RAISING # ########### raises", "= logger self.N_PLAYERS = 6 def generate_cards(self): cards = np.tile(np.arange(52),", "2) = treys.Card :param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card", "enumerate(community_cards): for player_idx,hole in enumerate(hole_cards[game_idx]): if contenders[game_idx, player_idx]: results[game_idx, player_idx]", ":])] community_cards = cards[:, :5] hole_cards = np.reshape(cards[:, 5:5 +", "= BIG_BLIND self.logger = logger self.N_PLAYERS = 6 def generate_cards(self):", "bets # Flop bets, _ = self.run_round(players, prev_round_investment, folded, constants.FLOP,", "folded continue to fold actions[folded[:, player_idx] == 1] = constants.FOLD", "raises.size > 0: # print(\"True raises\", raises, amounts[raises]) investment =", "########### calls = np.where(np.logical_and(round_countdown > 0, actions == constants.CALL))[0] if", "INITIAL_CAPITAL self.SMALL_BLIND = SMALL_BLIND self.BIG_BLIND = BIG_BLIND self.logger = logger", "= np.tile(np.arange(52), (self.BATCH_SIZE, 1)) for i in range(self.BATCH_SIZE): cards[i, :]", "player_order[:2] while True: running_games = np.nonzero(round_countdown > 0)[0] for player_idx,", "enumerate(players): round, current_bets, min_raise, prev_round_investment, folded, last_raiser = end_state player.end_trajectory(player_idx,", "it bloats the log size (by ~500 kB or so,", "player in players: player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS) # Pre-flop bets, _", "will be split n_splits_per_game = participants.sum(axis=1) # Split and distribute", "> 0, actions == constants.FOLD))[0], player_idx] = 1 round_countdown[running_games] -=", "= self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded)) ranks = np.argsort(hand_scores, axis=1) sorted_hands =", "np.tile(np.arange(52), (self.BATCH_SIZE, 1)) for i in range(self.BATCH_SIZE): cards[i, :] =", "n_players, {0,3,4,5}) = treys.Card :return: current_bets: np.ndarray(batchsize, n_players)=int {0-200} \"\"\"", "total_winnings[:, player_idx]) return total_winnings def run_round(self, players, prev_round_investment, folded, round,", "calls.size > 0: investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls])", "round, hole_cards, community_cards): \"\"\" :param players: [Player] :param prev_round_investment: np.ndarray(batchsize,", "np.where(np.logical_and(round_countdown > 0, actions == constants.RAISE))[0] if raises.size > 0:", "current_bets[:, 1] = self.BIG_BLIND max_bets[:] = self.BIG_BLIND player_order = player_order[2:]", "> 0)[0] for player_idx, player in player_order: actions, amounts =", "current_bets, min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards, total_winnings[:,", "constants.CALL))[0] if calls.size > 0: investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls,", "self.N_PLAYERS), dtype=int) max_bets = np.zeros(self.BATCH_SIZE, dtype=int) min_raise = np.zeros(self.BATCH_SIZE, dtype=int)", "community_cards[:, :4]) prev_round_investment += bets # River bets, end_state =", "= self.SMALL_BLIND current_bets[:, 1] = self.BIG_BLIND max_bets[:] = self.BIG_BLIND player_order", "amounts[raises], max_bets[raises] + min_raise[raises]) min_raise[raises] = investment - max_bets[raises] max_bets[raises]", "bets, _ = self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:, :4])", "calls = np.where(np.logical_and(round_countdown > 0, actions == constants.CALL))[0] if calls.size", "> 0, actions == constants.CALL))[0] if calls.size > 0: investment", "def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger): self.BATCH_SIZE = BATCH_SIZE", "self.N_PLAYERS if round == constants.PRE_FLOP: current_bets[:, 0] = self.SMALL_BLIND current_bets[:,", "prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:, :4]) prev_round_investment += bets #", "for player_idx,hole in enumerate(hole_cards[game_idx]): if contenders[game_idx, player_idx]: results[game_idx, player_idx] =", "broken, is there another reason? round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0", "class GameEngine: def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger): self.BATCH_SIZE", "community_cards[:, :3]) prev_round_investment += bets # Turn bets, _ =", "= np.zeros(self.BATCH_SIZE, dtype=int) min_raise = np.zeros(self.BATCH_SIZE, dtype=int) min_raise[:] = self.BIG_BLIND", "[Player] :param prev_round_investment: np.ndarray(batchsize, n_players) = int :param folded: np.ndarray(batchsize,", "# CALLING # ########### calls = np.where(np.logical_and(round_countdown > 0, actions", "{0,3,4,5}) = treys.Card :return: current_bets: np.ndarray(batchsize, n_players)=int {0-200} \"\"\" current_bets", "there another reason? round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0 if np.max(round_countdown[running_games])", "hole_cards, community_cards[:, :3]) prev_round_investment += bets # Turn bets, _", "= np.reshape(cards[:, 5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2))", "= constants.CALL ########### # CALLING # ########### calls = np.where(np.logical_and(round_countdown", "(self.BATCH_SIZE, self.N_PLAYERS, 2)) return community_cards, hole_cards def run_game(self, players): if", "bets # Turn bets, _ = self.run_round(players, prev_round_investment, folded, constants.TURN,", "= self.N_PLAYERS last_raiser[raises] = player_idx ########### # FOLDING # ###########", "np.zeros(self.BATCH_SIZE, dtype=int) min_raise = np.zeros(self.BATCH_SIZE, dtype=int) min_raise[:] = self.BIG_BLIND last_raiser", "= investment - max_bets[raises] max_bets[raises] = investment # Reset the", "1) results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int) for game_idx,community in", "0 if np.max(round_countdown[running_games]) <= 0: return current_bets, (round, current_bets, min_raise,", "as np import pickle import treys import constants FULL_DECK =", "actions, amounts, round_countdown, folded[:, player_idx])) # People who have already", "= list(enumerate(players)) round_countdown = np.zeros(self.BATCH_SIZE, dtype=int) round_countdown[:] = self.N_PLAYERS if", "amounts[raises]) investment = np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises] + min_raise[raises])", "size) # self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts, round_countdown, folded[:, player_idx]))", "number of times each pot will be split n_splits_per_game =", "min_raise[raises] = investment - max_bets[raises] max_bets[raises] = investment # Reset", "FOLDING # ########### folded[np.where(np.logical_and(round_countdown > 0, actions == constants.FOLD))[0], player_idx]", "np.array(treys.Deck.GetFullDeck()) class GameEngine: def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger):", "player in enumerate(players): round, current_bets, min_raise, prev_round_investment, folded, last_raiser =", "performance but breaks tests # test is not broken, is", "= 1 round_countdown[running_games] -= 1 #TODO: if all folded stops", "= np.where(np.logical_and(round_countdown > 0, actions == constants.RAISE))[0] if raises.size >", "total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float) hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded))", "players allowed'.format(self.N_PLAYERS)) community_cards, hole_cards = self.generate_cards() folded = np.zeros((self.BATCH_SIZE, len(players)),", "n_players)=int {0-200} \"\"\" current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int) max_bets =", "descending to 1) results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int) for", "# ########### calls = np.where(np.logical_and(round_countdown > 0, actions == constants.CALL))[0]", "actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx] == self.INITIAL_CAPITAL] = constants.CALL ###########", "= np.array(treys.Deck.GetFullDeck()) class GameEngine: def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND,", "the best hand and among which pots will be split", "prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards, total_winnings[:, player_idx]) return", "folded, constants.TURN, hole_cards, community_cards[:, :4]) prev_round_investment += bets # River", "last_raiser = np.zeros(self.BATCH_SIZE, dtype=int) player_order = list(enumerate(players)) round_countdown = np.zeros(self.BATCH_SIZE,", "log size (by ~500 kB or so, which triples the", "participants = hand_scores == sorted_hands[:, 0][:, None] # Get the", "########### # CALLING # ########### calls = np.where(np.logical_and(round_countdown > 0,", "bets and countdown current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises,", "np.ndarray(batchsize, n_players, 2) = treys.Card :param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5})", "player_idx], max_bets[calls]) # Reset the bets and countdown current_bets[calls, player_idx]", "np.zeros((self.BATCH_SIZE, len(players)), dtype=bool) prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int) for player", "min_raise, prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :], community_cards) # Disabled", "split n_splits_per_game = participants.sum(axis=1) # Split and distribute the money", ":0]) prev_round_investment += bets # Flop bets, _ = self.run_round(players,", "in players: player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS) # Pre-flop bets, _ =", "folded, last_raiser, hole_cards[:, player_idx, :], community_cards) # Disabled when not", "max_bets[raises] = investment # Reset the bets and countdown current_bets[raises,", "= SMALL_BLIND self.BIG_BLIND = BIG_BLIND self.logger = logger self.N_PLAYERS =", "-= prev_round_investment self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for p in players],", "players: player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS) # Pre-flop bets, _ = self.run_round(players,", "print(\"True raises\", raises, amounts[raises]) investment = np.maximum(current_bets[raises, player_idx] + amounts[raises],", "bets, end_state = self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards, community_cards) prev_round_investment", ":], community_cards) # Disabled when not necessary because it bloats", "prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int) for player in players: player.initialize(self.BATCH_SIZE,", "hand can have (scores are descending to 1) results =", "actions, amounts = player.act(player_idx, round, round_countdown > 0, current_bets, min_raise,", "np.ndarray(batchsize, n_players) = bool :param round: int ∈ {0..3} :param", "indices=ranks, axis=1) # Get everyone who has the best hand", "# People who have gone all-in continue to be all-in", "folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool) prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int)", "player_idx] + current_bets[:, player_idx] == self.INITIAL_CAPITAL] = constants.CALL ########### #", "last_raiser, hole_cards[:, player_idx, :], community_cards, total_winnings[:, player_idx]) return total_winnings def", "round_countdown[running_games] -= 1 #TODO: if all folded stops game, improves", "constants.RIVER, hole_cards, community_cards) prev_round_investment += bets # Showdown pool =", "player_idx] = investment ########### # RAISING # ########### raises =", "be all-in actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx] == self.INITIAL_CAPITAL] =", "among which pots will be split participants = hand_scores ==", "stops game, improves performance but breaks tests # test is", "sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1) # Get everyone who has", "# ########### raises = np.where(np.logical_and(round_countdown > 0, actions == constants.RAISE))[0]", "def generate_cards(self): cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1)) for i in", "dtype=bool) prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int) for player in players:", "= 0 if np.max(round_countdown[running_games]) <= 0: return current_bets, (round, current_bets,", "raises = np.where(np.logical_and(round_countdown > 0, actions == constants.RAISE))[0] if raises.size", "split participants = hand_scores == sorted_hands[:, 0][:, None] # Get", "community_cards, total_winnings[:, player_idx]) return total_winnings def run_round(self, players, prev_round_investment, folded,", "BIG_BLIND, logger): self.BATCH_SIZE = BATCH_SIZE self.INITIAL_CAPITAL = INITIAL_CAPITAL self.SMALL_BLIND =", "investment = np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises] + min_raise[raises]) min_raise[raises]", "gains = pool / n_splits_per_game total_winnings += participants * gains[:,", "Showdown pool = np.sum(prev_round_investment, axis=1) total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float)", "player_idx,hole in enumerate(hole_cards[game_idx]): if contenders[game_idx, player_idx]: results[game_idx, player_idx] = evaluator.evaluate(community.tolist(),", "hand_scores == sorted_hands[:, 0][:, None] # Get the number of", "prev_round_investment, folded, round, hole_cards, community_cards): \"\"\" :param players: [Player] :param", "(round, player_idx, actions, amounts, round_countdown, folded[:, player_idx])) # People who", "{0-200} \"\"\" current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int) max_bets = np.zeros(self.BATCH_SIZE,", "in players], folded, hole_cards)) self.logger.save_to_file() for player_idx, player in enumerate(players):", ":param folded: np.ndarray(batchsize, n_players) = bool :param round: int ∈", "tests # test is not broken, is there another reason?", "ValueError('Only {} players allowed'.format(self.N_PLAYERS)) community_cards, hole_cards = self.generate_cards() folded =" ]
[ "if obj.__class__ == Placeholder: placeholders[obj.pk] = obj if obj.__class__ ==", "int(plugin.pk): plugin.set_base_attr(p) p.save() for old in current_plugins: if old.pk ==", "created the placeholders/ get them instead try: placeholders[pk] = page.placeholders.get(slot=placeholder.slot)", "obj): from reversion.models import Version version = get_object_or_404(Version, pk=version_id) revs", "plugin in cms_plugin_list: # connect plugins to the correct placeholder", "current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page)) for pk, placeholder in placeholders.items(): # admin", "for plugin in cms_plugin_list: # connect plugins to the correct", "plugin.placeholder = placeholders[plugin.placeholder_id] plugin.save(no_signals=True) for plugin in cms_plugin_list: plugin.save() for", "plugin.save(no_signals=True) for plugin in cms_plugin_list: plugin.save() for p in plugin_list:", "[] others = [] page = obj lang = get_language_from_request(request)", "elif obj.__class__ == Title: titles.append(obj) else: others.append(rev) if not page.has_change_permission(request):", "the placeholders/ get them instead try: placeholders[pk] = page.placeholders.get(slot=placeholder.slot) except", "for rev in revs: obj = rev.object if obj.__class__ ==", "placeholders = {} plugin_list = [] titles = [] others", "page.placeholders.get(slot=placeholder.slot) except Placeholder.DoesNotExist: placeholders[pk].save() page.placeholders.add(placeholders[pk]) for plugin in cms_plugin_list: #", "has already created the placeholders/ get them instead try: placeholders[pk]", "[] placeholders = {} plugin_list = [] titles = []", "obj = rev.object if obj.__class__ == Placeholder: placeholders[obj.pk] = obj", "int(p.cmsplugin_ptr_id) == int(plugin.pk): plugin.set_base_attr(p) p.save() for old in current_plugins: if", "cms.models import Page, Title, CMSPlugin, Placeholder from cms.utils import get_language_from_request", "CMSPlugin: cms_plugin_list.append(obj) elif hasattr(obj, 'cmsplugin_ptr_id'): plugin_list.append(obj) elif obj.__class__ == Page:", "elif hasattr(obj, 'cmsplugin_ptr_id'): plugin_list.append(obj) elif obj.__class__ == Page: pass #page", "for old in current_plugins: if old.pk == plugin.pk: plugin.save() current_plugins.remove(old)", "plugin.pk: plugin.save() current_plugins.remove(old) for title in titles: title.page = page", "current_plugins.remove(old) for title in titles: title.page = page try: title.save()", "obj.__class__ == CMSPlugin: cms_plugin_list.append(obj) elif hasattr(obj, 'cmsplugin_ptr_id'): plugin_list.append(obj) elif obj.__class__", "version.revision.version_set.all()] cms_plugin_list = [] placeholders = {} plugin_list = []", "= [] placeholders = {} plugin_list = [] titles =", "from django.shortcuts import get_object_or_404 def revert_plugins(request, version_id, obj): from reversion.models", "obj lang = get_language_from_request(request) for rev in revs: obj =", "== Placeholder: placeholders[obj.pk] = obj if obj.__class__ == CMSPlugin: cms_plugin_list.append(obj)", "cms_plugin_list.append(obj) elif hasattr(obj, 'cmsplugin_ptr_id'): plugin_list.append(obj) elif obj.__class__ == Page: pass", "plugins to the correct placeholder plugin.placeholder = placeholders[plugin.placeholder_id] plugin.save(no_signals=True) for", "= get_language_from_request(request) for rev in revs: obj = rev.object if", "= placeholders[plugin.placeholder_id] plugin.save(no_signals=True) for plugin in cms_plugin_list: plugin.save() for p", "import get_language_from_request from django.http import Http404 from django.shortcuts import get_object_or_404", "get_object_or_404 def revert_plugins(request, version_id, obj): from reversion.models import Version version", "cms_plugin_list: plugin.save() for p in plugin_list: if int(p.cmsplugin_ptr_id) == int(plugin.pk):", "obj.__class__ == Page: pass #page = obj #Page.objects.get(pk=obj.pk) elif obj.__class__", "for title in titles: title.page = page try: title.save() except:", "plugin.save() for p in plugin_list: if int(p.cmsplugin_ptr_id) == int(plugin.pk): plugin.set_base_attr(p)", "in cms_plugin_list: # connect plugins to the correct placeholder plugin.placeholder", "in version.revision.version_set.all()] cms_plugin_list = [] placeholders = {} plugin_list =", "= [related_version.object_version for related_version in version.revision.version_set.all()] cms_plugin_list = [] placeholders", "page = obj lang = get_language_from_request(request) for rev in revs:", "= page try: title.save() except: title.pk = Title.objects.get(page=page, language=title.language).pk title.save()", "version_id, obj): from reversion.models import Version version = get_object_or_404(Version, pk=version_id)", "not page.has_change_permission(request): raise Http404 current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page)) for pk, placeholder", "for pk, placeholder in placeholders.items(): # admin has already created", "plugin.set_base_attr(p) p.save() for old in current_plugins: if old.pk == plugin.pk:", "p in plugin_list: if int(p.cmsplugin_ptr_id) == int(plugin.pk): plugin.set_base_attr(p) p.save() for", "from cms.models import Page, Title, CMSPlugin, Placeholder from cms.utils import", "correct placeholder plugin.placeholder = placeholders[plugin.placeholder_id] plugin.save(no_signals=True) for plugin in cms_plugin_list:", "import Http404 from django.shortcuts import get_object_or_404 def revert_plugins(request, version_id, obj):", "page.has_change_permission(request): raise Http404 current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page)) for pk, placeholder in", "def revert_plugins(request, version_id, obj): from reversion.models import Version version =", "#page = obj #Page.objects.get(pk=obj.pk) elif obj.__class__ == Title: titles.append(obj) else:", "current_plugins: if old.pk == plugin.pk: plugin.save() current_plugins.remove(old) for title in", "Page, Title, CMSPlugin, Placeholder from cms.utils import get_language_from_request from django.http", "= get_object_or_404(Version, pk=version_id) revs = [related_version.object_version for related_version in version.revision.version_set.all()]", "for p in plugin_list: if int(p.cmsplugin_ptr_id) == int(plugin.pk): plugin.set_base_attr(p) p.save()", "revs: obj = rev.object if obj.__class__ == Placeholder: placeholders[obj.pk] =", "#Page.objects.get(pk=obj.pk) elif obj.__class__ == Title: titles.append(obj) else: others.append(rev) if not", "obj if obj.__class__ == CMSPlugin: cms_plugin_list.append(obj) elif hasattr(obj, 'cmsplugin_ptr_id'): plugin_list.append(obj)", "titles: title.page = page try: title.save() except: title.pk = Title.objects.get(page=page,", "pk, placeholder in placeholders.items(): # admin has already created the", "get them instead try: placeholders[pk] = page.placeholders.get(slot=placeholder.slot) except Placeholder.DoesNotExist: placeholders[pk].save()", "title.save() for other in others: other.object.save() for plugin in current_plugins:", "= list(CMSPlugin.objects.filter(placeholder__page=page)) for pk, placeholder in placeholders.items(): # admin has", "elif obj.__class__ == Page: pass #page = obj #Page.objects.get(pk=obj.pk) elif", "# connect plugins to the correct placeholder plugin.placeholder = placeholders[plugin.placeholder_id]", "= obj lang = get_language_from_request(request) for rev in revs: obj", "if old.pk == plugin.pk: plugin.save() current_plugins.remove(old) for title in titles:", "== CMSPlugin: cms_plugin_list.append(obj) elif hasattr(obj, 'cmsplugin_ptr_id'): plugin_list.append(obj) elif obj.__class__ ==", "Http404 current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page)) for pk, placeholder in placeholders.items(): #", "-*- coding: utf-8 -*- from cms.models import Page, Title, CMSPlugin,", "import get_object_or_404 def revert_plugins(request, version_id, obj): from reversion.models import Version", "reversion.models import Version version = get_object_or_404(Version, pk=version_id) revs = [related_version.object_version", "related_version in version.revision.version_set.all()] cms_plugin_list = [] placeholders = {} plugin_list", "cms_plugin_list: # connect plugins to the correct placeholder plugin.placeholder =", "'cmsplugin_ptr_id'): plugin_list.append(obj) elif obj.__class__ == Page: pass #page = obj", "in cms_plugin_list: plugin.save() for p in plugin_list: if int(p.cmsplugin_ptr_id) ==", "plugin_list = [] titles = [] others = [] page", "placeholder in placeholders.items(): # admin has already created the placeholders/", "coding: utf-8 -*- from cms.models import Page, Title, CMSPlugin, Placeholder", "lang = get_language_from_request(request) for rev in revs: obj = rev.object", "Version version = get_object_or_404(Version, pk=version_id) revs = [related_version.object_version for related_version", "in placeholders.items(): # admin has already created the placeholders/ get", "if int(p.cmsplugin_ptr_id) == int(plugin.pk): plugin.set_base_attr(p) p.save() for old in current_plugins:", "title in titles: title.page = page try: title.save() except: title.pk", "import Version version = get_object_or_404(Version, pk=version_id) revs = [related_version.object_version for", "title.page = page try: title.save() except: title.pk = Title.objects.get(page=page, language=title.language).pk", "others = [] page = obj lang = get_language_from_request(request) for", "== plugin.pk: plugin.save() current_plugins.remove(old) for title in titles: title.page =", "Title.objects.get(page=page, language=title.language).pk title.save() for other in others: other.object.save() for plugin", "revert_plugins(request, version_id, obj): from reversion.models import Version version = get_object_or_404(Version,", "# admin has already created the placeholders/ get them instead", "utf-8 -*- from cms.models import Page, Title, CMSPlugin, Placeholder from", "from cms.utils import get_language_from_request from django.http import Http404 from django.shortcuts", "obj #Page.objects.get(pk=obj.pk) elif obj.__class__ == Title: titles.append(obj) else: others.append(rev) if", "others.append(rev) if not page.has_change_permission(request): raise Http404 current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page)) for", "= obj if obj.__class__ == CMSPlugin: cms_plugin_list.append(obj) elif hasattr(obj, 'cmsplugin_ptr_id'):", "pass #page = obj #Page.objects.get(pk=obj.pk) elif obj.__class__ == Title: titles.append(obj)", "page try: title.save() except: title.pk = Title.objects.get(page=page, language=title.language).pk title.save() for", "import Page, Title, CMSPlugin, Placeholder from cms.utils import get_language_from_request from", "plugin_list: if int(p.cmsplugin_ptr_id) == int(plugin.pk): plugin.set_base_attr(p) p.save() for old in", "cms.utils import get_language_from_request from django.http import Http404 from django.shortcuts import", "from django.http import Http404 from django.shortcuts import get_object_or_404 def revert_plugins(request,", "Title, CMSPlugin, Placeholder from cms.utils import get_language_from_request from django.http import", "= {} plugin_list = [] titles = [] others =", "except: title.pk = Title.objects.get(page=page, language=title.language).pk title.save() for other in others:", "else: others.append(rev) if not page.has_change_permission(request): raise Http404 current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page))", "titles.append(obj) else: others.append(rev) if not page.has_change_permission(request): raise Http404 current_plugins =", "revs = [related_version.object_version for related_version in version.revision.version_set.all()] cms_plugin_list = []", "in plugin_list: if int(p.cmsplugin_ptr_id) == int(plugin.pk): plugin.set_base_attr(p) p.save() for old", "= [] others = [] page = obj lang =", "pk=version_id) revs = [related_version.object_version for related_version in version.revision.version_set.all()] cms_plugin_list =", "placeholder plugin.placeholder = placeholders[plugin.placeholder_id] plugin.save(no_signals=True) for plugin in cms_plugin_list: plugin.save()", "get_object_or_404(Version, pk=version_id) revs = [related_version.object_version for related_version in version.revision.version_set.all()] cms_plugin_list", "get_language_from_request from django.http import Http404 from django.shortcuts import get_object_or_404 def", "connect plugins to the correct placeholder plugin.placeholder = placeholders[plugin.placeholder_id] plugin.save(no_signals=True)", "old in current_plugins: if old.pk == plugin.pk: plugin.save() current_plugins.remove(old) for", "Title: titles.append(obj) else: others.append(rev) if not page.has_change_permission(request): raise Http404 current_plugins", "placeholders[pk] = page.placeholders.get(slot=placeholder.slot) except Placeholder.DoesNotExist: placeholders[pk].save() page.placeholders.add(placeholders[pk]) for plugin in", "from reversion.models import Version version = get_object_or_404(Version, pk=version_id) revs =", "except Placeholder.DoesNotExist: placeholders[pk].save() page.placeholders.add(placeholders[pk]) for plugin in cms_plugin_list: # connect", "rev in revs: obj = rev.object if obj.__class__ == Placeholder:", "Placeholder: placeholders[obj.pk] = obj if obj.__class__ == CMSPlugin: cms_plugin_list.append(obj) elif", "the correct placeholder plugin.placeholder = placeholders[plugin.placeholder_id] plugin.save(no_signals=True) for plugin in", "for related_version in version.revision.version_set.all()] cms_plugin_list = [] placeholders = {}", "in titles: title.page = page try: title.save() except: title.pk =", "django.http import Http404 from django.shortcuts import get_object_or_404 def revert_plugins(request, version_id,", "p.save() for old in current_plugins: if old.pk == plugin.pk: plugin.save()", "try: title.save() except: title.pk = Title.objects.get(page=page, language=title.language).pk title.save() for other", "them instead try: placeholders[pk] = page.placeholders.get(slot=placeholder.slot) except Placeholder.DoesNotExist: placeholders[pk].save() page.placeholders.add(placeholders[pk])", "hasattr(obj, 'cmsplugin_ptr_id'): plugin_list.append(obj) elif obj.__class__ == Page: pass #page =", "placeholders[plugin.placeholder_id] plugin.save(no_signals=True) for plugin in cms_plugin_list: plugin.save() for p in", "Placeholder.DoesNotExist: placeholders[pk].save() page.placeholders.add(placeholders[pk]) for plugin in cms_plugin_list: # connect plugins", "in current_plugins: if old.pk == plugin.pk: plugin.save() current_plugins.remove(old) for title", "version = get_object_or_404(Version, pk=version_id) revs = [related_version.object_version for related_version in", "plugin_list.append(obj) elif obj.__class__ == Page: pass #page = obj #Page.objects.get(pk=obj.pk)", "obj.__class__ == Title: titles.append(obj) else: others.append(rev) if not page.has_change_permission(request): raise", "[] titles = [] others = [] page = obj", "Page: pass #page = obj #Page.objects.get(pk=obj.pk) elif obj.__class__ == Title:", "obj.__class__ == Placeholder: placeholders[obj.pk] = obj if obj.__class__ == CMSPlugin:", "raise Http404 current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page)) for pk, placeholder in placeholders.items():", "[related_version.object_version for related_version in version.revision.version_set.all()] cms_plugin_list = [] placeholders =", "page.placeholders.add(placeholders[pk]) for plugin in cms_plugin_list: # connect plugins to the", "title.pk = Title.objects.get(page=page, language=title.language).pk title.save() for other in others: other.object.save()", "old.pk == plugin.pk: plugin.save() current_plugins.remove(old) for title in titles: title.page", "in revs: obj = rev.object if obj.__class__ == Placeholder: placeholders[obj.pk]", "for plugin in cms_plugin_list: plugin.save() for p in plugin_list: if", "django.shortcuts import get_object_or_404 def revert_plugins(request, version_id, obj): from reversion.models import", "Http404 from django.shortcuts import get_object_or_404 def revert_plugins(request, version_id, obj): from", "== Page: pass #page = obj #Page.objects.get(pk=obj.pk) elif obj.__class__ ==", "= Title.objects.get(page=page, language=title.language).pk title.save() for other in others: other.object.save() for", "placeholders.items(): # admin has already created the placeholders/ get them", "= [] page = obj lang = get_language_from_request(request) for rev", "get_language_from_request(request) for rev in revs: obj = rev.object if obj.__class__", "language=title.language).pk title.save() for other in others: other.object.save() for plugin in", "rev.object if obj.__class__ == Placeholder: placeholders[obj.pk] = obj if obj.__class__", "for other in others: other.object.save() for plugin in current_plugins: plugin.delete()", "== Title: titles.append(obj) else: others.append(rev) if not page.has_change_permission(request): raise Http404", "try: placeholders[pk] = page.placeholders.get(slot=placeholder.slot) except Placeholder.DoesNotExist: placeholders[pk].save() page.placeholders.add(placeholders[pk]) for plugin", "title.save() except: title.pk = Title.objects.get(page=page, language=title.language).pk title.save() for other in", "# -*- coding: utf-8 -*- from cms.models import Page, Title,", "= rev.object if obj.__class__ == Placeholder: placeholders[obj.pk] = obj if", "placeholders/ get them instead try: placeholders[pk] = page.placeholders.get(slot=placeholder.slot) except Placeholder.DoesNotExist:", "{} plugin_list = [] titles = [] others = []", "list(CMSPlugin.objects.filter(placeholder__page=page)) for pk, placeholder in placeholders.items(): # admin has already", "to the correct placeholder plugin.placeholder = placeholders[plugin.placeholder_id] plugin.save(no_signals=True) for plugin", "Placeholder from cms.utils import get_language_from_request from django.http import Http404 from", "CMSPlugin, Placeholder from cms.utils import get_language_from_request from django.http import Http404", "= [] titles = [] others = [] page =", "if obj.__class__ == CMSPlugin: cms_plugin_list.append(obj) elif hasattr(obj, 'cmsplugin_ptr_id'): plugin_list.append(obj) elif", "placeholders[pk].save() page.placeholders.add(placeholders[pk]) for plugin in cms_plugin_list: # connect plugins to", "already created the placeholders/ get them instead try: placeholders[pk] =", "plugin.save() current_plugins.remove(old) for title in titles: title.page = page try:", "= page.placeholders.get(slot=placeholder.slot) except Placeholder.DoesNotExist: placeholders[pk].save() page.placeholders.add(placeholders[pk]) for plugin in cms_plugin_list:", "placeholders[obj.pk] = obj if obj.__class__ == CMSPlugin: cms_plugin_list.append(obj) elif hasattr(obj,", "cms_plugin_list = [] placeholders = {} plugin_list = [] titles", "plugin in cms_plugin_list: plugin.save() for p in plugin_list: if int(p.cmsplugin_ptr_id)", "[] page = obj lang = get_language_from_request(request) for rev in", "== int(plugin.pk): plugin.set_base_attr(p) p.save() for old in current_plugins: if old.pk", "-*- from cms.models import Page, Title, CMSPlugin, Placeholder from cms.utils", "admin has already created the placeholders/ get them instead try:", "if not page.has_change_permission(request): raise Http404 current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page)) for pk,", "instead try: placeholders[pk] = page.placeholders.get(slot=placeholder.slot) except Placeholder.DoesNotExist: placeholders[pk].save() page.placeholders.add(placeholders[pk]) for", "titles = [] others = [] page = obj lang", "= obj #Page.objects.get(pk=obj.pk) elif obj.__class__ == Title: titles.append(obj) else: others.append(rev)" ]
[ "转化为int64 # print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False)", "aa <= 40: print(aa) path, imagename = os.path.split(imagelist[aa]) im =", "+ 'delete/' + 'idetail_' + imagename) gen_number += 1 label_dict", "'label_delete.csv', index=False) # label_gen_p = pd.DataFrame(label_gen_dict) # label_gen_p.to_csv(gpath + 'label_gen.csv',", "label = label.append(label_gen_pd) # 将生成的图片label加入原先的label # label['label'] = label[['label']].astype('int64') #", "# 转化为int64 # print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv',", "range(len(imagelist)): if aa <= 20: print(aa) path, imagename = os.path.split(imagelist[aa])", "= pd.DataFrame(label_dict) # label = label.append(label_gen_pd) # 将生成的图片label加入原先的label # label['label']", "label_gen = [], [] # for imagefile in imagelist: for", "path, imagename = os.path.split(imagelist[aa]) im = Image.open(imagelist[aa]) im = im.convert('RGB')", "{'img_path':img_path_gen, 'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label =", "index=False) print('训练集总共生成%d个图片'%gen_number) if datatype == 'valid': gen_number = 0 if", "# im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename)", "if aa <= 20: print(aa) path, imagename = os.path.split(imagelist[aa]) im", "+'idetail_'+imagename) gen_number += 1 label_dict = {'img_path':img_path_gen, 'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen)", "datatype == 'train': gen_number = 0 # 统计生成的图片数量 if not", "im_detail.save(gpath + 'delete/' +'idetail_'+imagename) gen_number += 1 label_dict = {'img_path':img_path_gen,", "= {'img_path': img_path_gen, 'label': label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict)", "<= 20: print(aa) path, imagename = os.path.split(imagelist[aa]) im = Image.open(imagelist[aa])", "'label_gen.csv', index=False) print('训练集总共生成%d个图片'%gen_number) if datatype == 'valid': gen_number = 0", "plt import matplotlib.image as mpimg # import seaborn as sns", "+ 'delete/' + 'idetail_' + imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/'", "= label[['label']].astype('int64') # 转化为int64 # print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath", "= {'img_path':[], 'label':[]} for i in range(61): li = label[label['label']", "as sns import pandas as pd import numpy as np", "imagelist: for aa in range(len(imagelist)): if aa <= 20: print(aa)", "+ imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' + 'idetail_' + imagename)", "= pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) print('验证集总共生成%d个图片'%gen_number) if __name__ ==", "'label_delete.csv', index=False) print('验证集总共生成%d个图片'%gen_number) if __name__ == '__main__': genImage(train_path, 'train') genImage(valid_path,", "label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' +'idetail_'+imagename) gen_number += 1 label_dict =", "label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' + 'idetail_' + imagename) gen_number +=", "i in range(61): li = label[label['label'] == i] imagenum =", "= label.append(label_gen_pd) # 将生成的图片label加入原先的label # label['label'] = label[['label']].astype('int64') # 转化为int64", "aa in range(len(imagelist)): if aa <= 40: print(aa) path, imagename", "label_gen_dict = {'img_path':[], 'label':[]} # 生成图片label for i in range(61):", "# label_gen_p = pd.DataFrame(label_gen_dict) # label_gen_p.to_csv(gpath + 'label_gen.csv', index=False) print('训练集总共生成%d个图片'%gen_number)", "img_path_gen, 'label': label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label", "print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) print('验证集总共生成%d个图片'%gen_number) if", "'label.csv') label_gen_dict = {'img_path':[], 'label':[]} # 生成图片label for i in", "im_detail.save(gpath + 'delete/' + 'idetail_' + imagename) gen_number += 1", "pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} # 生成图片label for", "for aa in range(len(imagelist)): if aa <= 40: print(aa) path,", "import seaborn as sns import pandas as pd import numpy", "细节增强 img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' +'idetail_'+imagename)", "label_gen_pd = pd.DataFrame(label_dict) # label = label.append(label_gen_pd) # 将生成的图片label加入原先的label #", "valid_path = './AgriculturalDisease_validationset/' def genImage(gpath, datatype): if datatype == 'train':", "imagefile in imagelist: for aa in range(len(imagelist)): if aa <=", "40: print(aa) path, imagename = os.path.split(imagelist[aa]) im = Image.open(imagelist[aa]) im", "统计生成的图片数量 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath + 'label.csv')", "'./AgriculturalDisease_trainingset/' valid_path = './AgriculturalDisease_validationset/' def genImage(gpath, datatype): if datatype ==", "in range(61): li = label[label['label'] == i] imagenum = li['label'].count()", "= label[label['label'] == i] imagenum = li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist", "label.append(label_gen_pd) # 将生成的图片label加入原先的label # label['label'] = label[['label']].astype('int64') # 转化为int64 #", "'delete/' +'idetail_'+imagename) gen_number += 1 label_dict = {'img_path':img_path_gen, 'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen)", "# 将生成的图片label加入原先的label # label['label'] = label[['label']].astype('int64') # 转化为int64 # print(label)", "= Image.open(imagelist[aa]) im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) #im_detail =", "im_detail = im.transpose(Image.ROTATE_180) #im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath +", "+ 'label_gen.csv', index=False) print('训练集总共生成%d个图片'%gen_number) if datatype == 'valid': gen_number =", "im.transpose(Image.ROTATE_180) # im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/'", "np.array(li['img_path']).tolist() img_path_gen, label_gen = [], [] # for imagefile in", "将生成的图片label加入原先的label # label['label'] = label[['label']].astype('int64') # 转化为int64 # print(label) label_gen_p", "<= 40: print(aa) path, imagename = os.path.split(imagelist[aa]) im = Image.open(imagelist[aa])", "1 label_dict = {'img_path':img_path_gen, 'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict)", "'label':[]} for i in range(61): li = label[label['label'] == i]", "for aa in range(len(imagelist)): if aa <= 20: print(aa) path,", "gen_number += 1 label_dict = {'img_path': img_path_gen, 'label': label_gen} label_gen_dict['img_path'].extend(img_path_gen)", "os from PIL import Image, ImageFilter import matplotlib.pyplot as plt", "if datatype == 'train': gen_number = 0 # 统计生成的图片数量 if", "for imagefile in imagelist: for aa in range(len(imagelist)): if aa", "{'img_path':[], 'label':[]} # 生成图片label for i in range(61): li =", "seaborn as sns import pandas as pd import numpy as", "= os.path.split(imagelist[aa]) im = Image.open(imagelist[aa]) im = im.convert('RGB') im_detail =", "+ 'label_delete.csv', index=False) # label_gen_p = pd.DataFrame(label_gen_dict) # label_gen_p.to_csv(gpath +", "# 细节增强 img_path_gen.append(gpath + 'delete/' + 'idetail_' + imagename) label_gen.extend([int(i)])", "range(len(imagelist)): if aa <= 40: print(aa) path, imagename = os.path.split(imagelist[aa])", "label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label = label.append(label_gen_pd)", "im.transpose(Image.ROTATE_180) #im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' +", "'label.csv') label_gen_dict = {'img_path':[], 'label':[]} for i in range(61): li", "= im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) # im_detail = im.filter(ImageFilter.DETAIL) #", "# print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) #", "imagelist: for aa in range(len(imagelist)): if aa <= 40: print(aa)", "# label_gen_p.to_csv(gpath + 'label_gen.csv', index=False) print('训练集总共生成%d个图片'%gen_number) if datatype == 'valid':", "import matplotlib.image as mpimg # import seaborn as sns import", "'train': gen_number = 0 # 统计生成的图片数量 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete')", "in imagelist: for aa in range(len(imagelist)): if aa <= 20:", "li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist = np.array(li['img_path']).tolist() img_path_gen, label_gen = [],", "= pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} for i", "label['label'] = label[['label']].astype('int64') # 转化为int64 # print(label) label_gen_p = pd.DataFrame(label_gen_dict)", "'label': label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label =", "pd.DataFrame(label_dict) # label = label.append(label_gen_pd) # 将生成的图片label加入原先的label # label['label'] =", "label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label = label.append(label_gen_pd) #", "imagenum)) imagelist = np.array(li['img_path']).tolist() img_path_gen, label_gen = [], [] #", "im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' + 'idetail_' + imagename)", "20: print(aa) path, imagename = os.path.split(imagelist[aa]) im = Image.open(imagelist[aa]) im", "in imagelist: for aa in range(len(imagelist)): if aa <= 40:", "label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label = label.append(label_gen_pd) # 将生成的图片label加入原先的label", "def genImage(gpath, datatype): if datatype == 'train': gen_number = 0", "# 细节增强 img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/'", "import Image, ImageFilter import matplotlib.pyplot as plt import matplotlib.image as", "imagenum = li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist = np.array(li['img_path']).tolist() img_path_gen, label_gen", "= im.transpose(Image.ROTATE_180) #im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/'", "{'img_path':[], 'label':[]} for i in range(61): li = label[label['label'] ==", "genImage(gpath, datatype): if datatype == 'train': gen_number = 0 #", "+= 1 label_dict = {'img_path': img_path_gen, 'label': label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen)", "im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename) label_gen.extend([int(i)])", "im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) # im_detail = im.filter(ImageFilter.DETAIL)", "+ 'idetail_' + imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' + 'idetail_'", "ImageFilter import matplotlib.pyplot as plt import matplotlib.image as mpimg #", "= im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename) label_gen.extend([int(i)]) im_detail.save(gpath", "import os from PIL import Image, ImageFilter import matplotlib.pyplot as", "# 统计生成的图片数量 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath +", "+ 'idetail_' + imagename) gen_number += 1 label_dict = {'img_path':", "= np.array(li['img_path']).tolist() img_path_gen, label_gen = [], [] # for imagefile", "+ imagename) gen_number += 1 label_dict = {'img_path': img_path_gen, 'label':", "== i] imagenum = li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist = np.array(li['img_path']).tolist()", "PIL import Image, ImageFilter import matplotlib.pyplot as plt import matplotlib.image", "matplotlib.image as mpimg # import seaborn as sns import pandas", "label = pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} for", "label[['label']].astype('int64') # 转化为int64 # print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath +", "= Image.open(imagelist[aa]) im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) # im_detail", "'delete/' +'idetail_'+imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' +'idetail_'+imagename) gen_number += 1", "imagename) gen_number += 1 label_dict = {'img_path': img_path_gen, 'label': label_gen}", "matplotlib.pyplot as plt import matplotlib.image as mpimg # import seaborn", "print('训练集总共生成%d个图片'%gen_number) if datatype == 'valid': gen_number = 0 if not", "# 生成图片label for i in range(61): li = label[label['label'] ==", "'delete/' + 'idetail_' + imagename) gen_number += 1 label_dict =", "0 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath + 'label.csv')", "from PIL import Image, ImageFilter import matplotlib.pyplot as plt import", "'valid': gen_number = 0 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label =", "+ 'delete/' +'idetail_'+imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' +'idetail_'+imagename) gen_number +=", "#im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' + 'idetail_'", "= im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) #im_detail = im.filter(ImageFilter.DETAIL) # 细节增强", "== 'train': gen_number = 0 # 统计生成的图片数量 if not os.path.exists(gpath+'delete'):", "pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} for i in", "gen_number = 0 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath", "aa in range(len(imagelist)): if aa <= 20: print(aa) path, imagename", "label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) print('验证集总共生成%d个图片'%gen_number) if __name__ == '__main__': genImage(train_path,", "imagename = os.path.split(imagelist[aa]) im = Image.open(imagelist[aa]) im = im.convert('RGB') im_detail", "im_detail = im.transpose(Image.ROTATE_180) # im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath", "label = pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} #", "im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) # im_detail = im.filter(ImageFilter.DETAIL) # 细节增强", "'delete/' + 'idetail_' + imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' +", "# for imagefile in imagelist: for aa in range(len(imagelist)): if", "pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) # label_gen_p = pd.DataFrame(label_gen_dict) #", "index=False) # label_gen_p = pd.DataFrame(label_gen_dict) # label_gen_p.to_csv(gpath + 'label_gen.csv', index=False)", "'idetail_' + imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' + 'idetail_' +", "gen_number = 0 # 统计生成的图片数量 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label", "label_dict = {'img_path': img_path_gen, 'label': label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd =", "pandas as pd import numpy as np import random train_path", "range(61): li = label[label['label'] == i] imagenum = li['label'].count() print('第%d个,总共有有%d个图片'%(i,", "imagelist = np.array(li['img_path']).tolist() img_path_gen, label_gen = [], [] # for", "== 'valid': gen_number = 0 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label", "im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) #im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath", "= pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} # 生成图片label", "gen_number += 1 label_dict = {'img_path':img_path_gen, 'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd", "im = Image.open(imagelist[aa]) im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) #im_detail", "'./AgriculturalDisease_validationset/' def genImage(gpath, datatype): if datatype == 'train': gen_number =", "import numpy as np import random train_path = './AgriculturalDisease_trainingset/' valid_path", "np import random train_path = './AgriculturalDisease_trainingset/' valid_path = './AgriculturalDisease_validationset/' def", "train_path = './AgriculturalDisease_trainingset/' valid_path = './AgriculturalDisease_validationset/' def genImage(gpath, datatype): if", "li = label[label['label'] == i] imagenum = li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum))", "print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist = np.array(li['img_path']).tolist() img_path_gen, label_gen = [], []", "random train_path = './AgriculturalDisease_trainingset/' valid_path = './AgriculturalDisease_validationset/' def genImage(gpath, datatype):", "import matplotlib.pyplot as plt import matplotlib.image as mpimg # import", "label_dict = {'img_path':img_path_gen, 'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) #", "<reponame>lvwuyunlifan/crop import os from PIL import Image, ImageFilter import matplotlib.pyplot", "im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename) label_gen.extend([int(i)]) im_detail.save(gpath +", "+ 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} for i in range(61):", "as np import random train_path = './AgriculturalDisease_trainingset/' valid_path = './AgriculturalDisease_validationset/'", "# label['label'] = label[['label']].astype('int64') # 转化为int64 # print(label) label_gen_p =", "label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) # label_gen_p = pd.DataFrame(label_gen_dict) # label_gen_p.to_csv(gpath", "1 label_dict = {'img_path': img_path_gen, 'label': label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd", "pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) print('验证集总共生成%d个图片'%gen_number) if __name__ == '__main__':", "print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) # label_gen_p", "mpimg # import seaborn as sns import pandas as pd", "+ 'label_delete.csv', index=False) print('验证集总共生成%d个图片'%gen_number) if __name__ == '__main__': genImage(train_path, 'train')", "im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) #im_detail = im.filter(ImageFilter.DETAIL) #", "os.path.split(imagelist[aa]) im = Image.open(imagelist[aa]) im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180)", "+= 1 label_dict = {'img_path':img_path_gen, 'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd =", "= './AgriculturalDisease_validationset/' def genImage(gpath, datatype): if datatype == 'train': gen_number", "if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath + 'label.csv') label_gen_dict", "not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath + 'label.csv') label_gen_dict =", "sns import pandas as pd import numpy as np import", "+'idetail_'+imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' +'idetail_'+imagename) gen_number += 1 label_dict", "as plt import matplotlib.image as mpimg # import seaborn as", "+ 'delete/' +'idetail_'+imagename) gen_number += 1 label_dict = {'img_path':img_path_gen, 'label':label_gen}", "label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) print('验证集总共生成%d个图片'%gen_number) if __name__", "img_path_gen, label_gen = [], [] # for imagefile in imagelist:", "label[label['label'] == i] imagenum = li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist =", "= pd.DataFrame(label_gen_dict) # label_gen_p.to_csv(gpath + 'label_gen.csv', index=False) print('训练集总共生成%d个图片'%gen_number) if datatype", "numpy as np import random train_path = './AgriculturalDisease_trainingset/' valid_path =", "img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' +'idetail_'+imagename) gen_number", "in range(len(imagelist)): if aa <= 20: print(aa) path, imagename =", "= './AgriculturalDisease_trainingset/' valid_path = './AgriculturalDisease_validationset/' def genImage(gpath, datatype): if datatype", "= li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist = np.array(li['img_path']).tolist() img_path_gen, label_gen =", "+ 'label.csv') label_gen_dict = {'img_path':[], 'label':[]} # 生成图片label for i", "'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label = label.append(label_gen_pd)", "as mpimg # import seaborn as sns import pandas as", "[] # for imagefile in imagelist: for aa in range(len(imagelist)):", "as pd import numpy as np import random train_path =", "Image, ImageFilter import matplotlib.pyplot as plt import matplotlib.image as mpimg", "os.makedirs(gpath+'delete') label = pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[], 'label':[]}", "im = Image.open(imagelist[aa]) im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) #", "# import seaborn as sns import pandas as pd import", "= {'img_path':img_path_gen, 'label':label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) # label", "生成图片label for i in range(61): li = label[label['label'] == i]", "# label = label.append(label_gen_pd) # 将生成的图片label加入原先的label # label['label'] = label[['label']].astype('int64')", "if datatype == 'valid': gen_number = 0 if not os.path.exists(gpath+'delete'):", "= 0 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath +", "label_gen_dict = {'img_path':[], 'label':[]} for i in range(61): li =", "= im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath + 'delete/' + 'idetail_' +", "label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) # label_gen_p =", "for i in range(61): li = label[label['label'] == i] imagenum", "pd.DataFrame(label_gen_dict) # label_gen_p.to_csv(gpath + 'label_gen.csv', index=False) print('训练集总共生成%d个图片'%gen_number) if datatype ==", "index=False) print('验证集总共生成%d个图片'%gen_number) if __name__ == '__main__': genImage(train_path, 'train') genImage(valid_path, 'valid')", "import random train_path = './AgriculturalDisease_trainingset/' valid_path = './AgriculturalDisease_validationset/' def genImage(gpath,", "# print(label) label_gen_p = pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) print('验证集总共生成%d个图片'%gen_number)", "= 0 # 统计生成的图片数量 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label =", "label_gen_p = pd.DataFrame(label_gen_dict) # label_gen_p.to_csv(gpath + 'label_gen.csv', index=False) print('训练集总共生成%d个图片'%gen_number) if", "import pandas as pd import numpy as np import random", "= [], [] # for imagefile in imagelist: for aa", "i] imagenum = li['label'].count() print('第%d个,总共有有%d个图片'%(i, imagenum)) imagelist = np.array(li['img_path']).tolist() img_path_gen,", "= pd.DataFrame(label_gen_dict) label_gen_p.to_csv(gpath + 'label_delete.csv', index=False) # label_gen_p = pd.DataFrame(label_gen_dict)", "os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath + 'label.csv') label_gen_dict = {'img_path':[],", "细节增强 img_path_gen.append(gpath + 'delete/' + 'idetail_' + imagename) label_gen.extend([int(i)]) im_detail.save(gpath", "0 # 统计生成的图片数量 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete') label = pd.read_csv(gpath", "img_path_gen.append(gpath + 'delete/' + 'idetail_' + imagename) label_gen.extend([int(i)]) im_detail.save(gpath +", "in range(len(imagelist)): if aa <= 40: print(aa) path, imagename =", "= {'img_path':[], 'label':[]} # 生成图片label for i in range(61): li", "Image.open(imagelist[aa]) im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) # im_detail =", "imagename) label_gen.extend([int(i)]) im_detail.save(gpath + 'delete/' + 'idetail_' + imagename) gen_number", "{'img_path': img_path_gen, 'label': label_gen} label_gen_dict['img_path'].extend(img_path_gen) label_gen_dict['label'].extend(label_gen) label_gen_pd = pd.DataFrame(label_dict) #", "datatype): if datatype == 'train': gen_number = 0 # 统计生成的图片数量", "datatype == 'valid': gen_number = 0 if not os.path.exists(gpath+'delete'): os.makedirs(gpath+'delete')", "pd import numpy as np import random train_path = './AgriculturalDisease_trainingset/'", "'label':[]} # 生成图片label for i in range(61): li = label[label['label']", "label_gen_p.to_csv(gpath + 'label_gen.csv', index=False) print('训练集总共生成%d个图片'%gen_number) if datatype == 'valid': gen_number", "aa <= 20: print(aa) path, imagename = os.path.split(imagelist[aa]) im =", "= im.transpose(Image.ROTATE_180) # im_detail = im.filter(ImageFilter.DETAIL) # 细节增强 img_path_gen.append(gpath +", "if aa <= 40: print(aa) path, imagename = os.path.split(imagelist[aa]) im", "print(aa) path, imagename = os.path.split(imagelist[aa]) im = Image.open(imagelist[aa]) im =", "'idetail_' + imagename) gen_number += 1 label_dict = {'img_path': img_path_gen,", "[], [] # for imagefile in imagelist: for aa in", "Image.open(imagelist[aa]) im = im.convert('RGB') im_detail = im.transpose(Image.ROTATE_180) #im_detail = im.filter(ImageFilter.DETAIL)" ]
[ "中文字体 chFont = font_manager.FontProperties(family=\"SimHei\") # SimHei # chFont = font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\")", "- - - - - - - - - -", "range(60)] xLabels += [\"11点,{}分\".format(i) for i in range(60)] plt.xticks(list(x)[::step], xLabels[::step],", "plt from matplotlib import rc from matplotlib import font_manager import", "xLabels[::step], rotation=25, fontProperties=chFont) # 添加描述信息 plt.xlabel(\"时间\", fontProperties=chFont) plt.ylabel(\"温度 单位(℃)\", fontProperties=chFont)", "y = [random.randint(20, 35) for i in range(120)] plt.figure(figsize=(20, 8),", "xLabels += [\"11点,{}分\".format(i) for i in range(60)] plt.xticks(list(x)[::step], xLabels[::step], rotation=25,", "font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\") # 刻度相关设置 step = 10 xLabels = [\"10点,{}分\".format(i) for", "# 添加描述信息 plt.xlabel(\"时间\", fontProperties=chFont) plt.ylabel(\"温度 单位(℃)\", fontProperties=chFont) plt.title(\"10点到12点每分钟的气温变化\", fontProperties=chFont) plt.show()", "= [random.randint(20, 35) for i in range(120)] plt.figure(figsize=(20, 8), dpi=80)", "- - - - # @author like # @since 2021-02-23", "刻度相关设置 step = 10 xLabels = [\"10点,{}分\".format(i) for i in", "# 十点到十二点的气温变化 from matplotlib import pyplot as plt from matplotlib", "# @since 2021-02-23 11:08 # @email <EMAIL> # 十点到十二点的气温变化 from", "= font_manager.FontProperties(family=\"SimHei\") # SimHei # chFont = font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\") # 刻度相关设置", "like # @since 2021-02-23 11:08 # @email <EMAIL> # 十点到十二点的气温变化", "= range(0, 120) y = [random.randint(20, 35) for i in", "120) y = [random.randint(20, 35) for i in range(120)] plt.figure(figsize=(20,", "# 中文字体 chFont = font_manager.FontProperties(family=\"SimHei\") # SimHei # chFont =", "i in range(60)] plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont) # 添加描述信息 plt.xlabel(\"时间\",", "11:08 # @email <EMAIL> # 十点到十二点的气温变化 from matplotlib import pyplot", "step = 10 xLabels = [\"10点,{}分\".format(i) for i in range(60)]", "= [\"10点,{}分\".format(i) for i in range(60)] xLabels += [\"11点,{}分\".format(i) for", "35) for i in range(120)] plt.figure(figsize=(20, 8), dpi=80) plt.plot(x, y)", "2021-02-23 11:08 # @email <EMAIL> # 十点到十二点的气温变化 from matplotlib import", "range(120)] plt.figure(figsize=(20, 8), dpi=80) plt.plot(x, y) # 中文字体 chFont =", "8), dpi=80) plt.plot(x, y) # 中文字体 chFont = font_manager.FontProperties(family=\"SimHei\") #", "import font_manager import random x = range(0, 120) y =", "- - - - - - - - - #", "- - - - - - # @author like #", "<EMAIL> # 十点到十二点的气温变化 from matplotlib import pyplot as plt from", "for i in range(120)] plt.figure(figsize=(20, 8), dpi=80) plt.plot(x, y) #", "- - - - - # @author like # @since", "range(0, 120) y = [random.randint(20, 35) for i in range(120)]", "[random.randint(20, 35) for i in range(120)] plt.figure(figsize=(20, 8), dpi=80) plt.plot(x,", "# - - - - - - - - -", "@email <EMAIL> # 十点到十二点的气温变化 from matplotlib import pyplot as plt", "plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont) # 添加描述信息 plt.xlabel(\"时间\", fontProperties=chFont) plt.ylabel(\"温度 单位(℃)\",", "xLabels = [\"10点,{}分\".format(i) for i in range(60)] xLabels += [\"11点,{}分\".format(i)", "十点到十二点的气温变化 from matplotlib import pyplot as plt from matplotlib import", "i in range(120)] plt.figure(figsize=(20, 8), dpi=80) plt.plot(x, y) # 中文字体", "for i in range(60)] plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont) # 添加描述信息", "<gh_stars>1-10 # - - - - - - - -", "import random x = range(0, 120) y = [random.randint(20, 35)", "= font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\") # 刻度相关设置 step = 10 xLabels = [\"10点,{}分\".format(i)", "font_manager.FontProperties(family=\"SimHei\") # SimHei # chFont = font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\") # 刻度相关设置 step", "y) # 中文字体 chFont = font_manager.FontProperties(family=\"SimHei\") # SimHei # chFont", "matplotlib import rc from matplotlib import font_manager import random x", "from matplotlib import rc from matplotlib import font_manager import random", "i in range(60)] xLabels += [\"11点,{}分\".format(i) for i in range(60)]", "rotation=25, fontProperties=chFont) # 添加描述信息 plt.xlabel(\"时间\", fontProperties=chFont) plt.ylabel(\"温度 单位(℃)\", fontProperties=chFont) plt.title(\"10点到12点每分钟的气温变化\",", "dpi=80) plt.plot(x, y) # 中文字体 chFont = font_manager.FontProperties(family=\"SimHei\") # SimHei", "- - # @author like # @since 2021-02-23 11:08 #", "in range(60)] xLabels += [\"11点,{}分\".format(i) for i in range(60)] plt.xticks(list(x)[::step],", "@since 2021-02-23 11:08 # @email <EMAIL> # 十点到十二点的气温变化 from matplotlib", "+= [\"11点,{}分\".format(i) for i in range(60)] plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont)", "- # @author like # @since 2021-02-23 11:08 # @email", "range(60)] plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont) # 添加描述信息 plt.xlabel(\"时间\", fontProperties=chFont) plt.ylabel(\"温度", "from matplotlib import pyplot as plt from matplotlib import rc", "in range(60)] plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont) # 添加描述信息 plt.xlabel(\"时间\", fontProperties=chFont)", "import rc from matplotlib import font_manager import random x =", "# SimHei # chFont = font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\") # 刻度相关设置 step =", "pyplot as plt from matplotlib import rc from matplotlib import", "# chFont = font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\") # 刻度相关设置 step = 10 xLabels", "- - - - - - - # @author like", "# @author like # @since 2021-02-23 11:08 # @email <EMAIL>", "matplotlib import pyplot as plt from matplotlib import rc from", "chFont = font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\") # 刻度相关设置 step = 10 xLabels =", "# @email <EMAIL> # 十点到十二点的气温变化 from matplotlib import pyplot as", "plt.figure(figsize=(20, 8), dpi=80) plt.plot(x, y) # 中文字体 chFont = font_manager.FontProperties(family=\"SimHei\")", "chFont = font_manager.FontProperties(family=\"SimHei\") # SimHei # chFont = font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\") #", "import pyplot as plt from matplotlib import rc from matplotlib", "rc from matplotlib import font_manager import random x = range(0,", "@author like # @since 2021-02-23 11:08 # @email <EMAIL> #", "fontProperties=chFont) # 添加描述信息 plt.xlabel(\"时间\", fontProperties=chFont) plt.ylabel(\"温度 单位(℃)\", fontProperties=chFont) plt.title(\"10点到12点每分钟的气温变化\", fontProperties=chFont)", "as plt from matplotlib import rc from matplotlib import font_manager", "from matplotlib import font_manager import random x = range(0, 120)", "in range(120)] plt.figure(figsize=(20, 8), dpi=80) plt.plot(x, y) # 中文字体 chFont", "SimHei # chFont = font_manager.FontProperties(fname=\"C:/Windows/Fonts/SIMHEI.TTF\") # 刻度相关设置 step = 10", "matplotlib import font_manager import random x = range(0, 120) y", "- - - # @author like # @since 2021-02-23 11:08", "[\"10点,{}分\".format(i) for i in range(60)] xLabels += [\"11点,{}分\".format(i) for i", "for i in range(60)] xLabels += [\"11点,{}分\".format(i) for i in", "random x = range(0, 120) y = [random.randint(20, 35) for", "= 10 xLabels = [\"10点,{}分\".format(i) for i in range(60)] xLabels", "[\"11点,{}分\".format(i) for i in range(60)] plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont) #", "plt.plot(x, y) # 中文字体 chFont = font_manager.FontProperties(family=\"SimHei\") # SimHei #", "10 xLabels = [\"10点,{}分\".format(i) for i in range(60)] xLabels +=", "font_manager import random x = range(0, 120) y = [random.randint(20,", "x = range(0, 120) y = [random.randint(20, 35) for i", "# 刻度相关设置 step = 10 xLabels = [\"10点,{}分\".format(i) for i", "- - - - - - - - # @author" ]
[ "data = dataset(\"clt\") canvas = regression.init() isoline = canvas.createisoline() isoline.label=\"y\"", "dataset(\"clt\") canvas = regression.init() isoline = canvas.createisoline() isoline.label=\"y\" texts=[] colors", "# Now set isoline.linecolors and test again. canvas.clear() isoline.linecolors =", "isoline.text = texts # First test using isoline.text[...].color canvas.plot(data, isoline,", "canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 3, baseline[1]) testImage =", "texts.append(text.name) else: texts.append(text) isoline.text = texts # First test using", "test again. canvas.clear() isoline.textcolors = colors canvas.plot(data, isoline, bg=1) baselineImage", "ret = regression.run_wo_terminate(canvas, \"test_vcs_isoline_labels.png\", baselineImage) # Now set isoline.linecolors and", "0: texts.append(text.name) else: texts.append(text) isoline.text = texts # First test", "baselineImage = \"%s%d%s\"%(baseline[0], 3, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels3.png\") ret +=", "= \"%s%d%s\"%(baseline[0], 3, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels3.png\") ret += regression.run_wo_terminate(canvas,", "isoline, bg=1) baseline = os.path.splitext(sys.argv[1]) baselineImage = \"%s%s\"%baseline ret =", "texts=[] colors = [] for i in range(10): text =", "= texts # First test using isoline.text[...].color canvas.plot(data, isoline, bg=1)", "texts.append(text) isoline.text = texts # First test using isoline.text[...].color canvas.plot(data,", "and test again. canvas.clear() isoline.linecolors = colors canvas.plot(data, isoline, bg=1)", "i text.height = 12 colors.append(100 + 12 * i) if", "i) if i%2 == 0: texts.append(text.name) else: texts.append(text) isoline.text =", "if i%2 == 0: texts.append(text.name) else: texts.append(text) isoline.text = texts", "set isoline.linecolors and test again. canvas.clear() isoline.linecolors = colors canvas.plot(data,", "[] for i in range(10): text = canvas.createtext() text.color =", "= [] for i in range(10): text = canvas.createtext() text.color", "isoline.label=\"y\" texts=[] colors = [] for i in range(10): text", "else: texts.append(text) isoline.text = texts # First test using isoline.text[...].color", "isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 2, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels2.png\")", "text.color = 50 + 12 * i text.height = 12", "baseline = os.path.splitext(sys.argv[1]) baselineImage = \"%s%s\"%baseline ret = regression.run_wo_terminate(canvas, \"test_vcs_isoline_labels.png\",", "= 50 + 12 * i text.height = 12 colors.append(100", "testImage = os.path.abspath(\"test_vcs_isoline_labels2.png\") ret += regression.run_wo_terminate(canvas, testImage, baselineImage) # Now", "12 colors.append(100 + 12 * i) if i%2 == 0:", "canvas.createisoline() isoline.label=\"y\" texts=[] colors = [] for i in range(10):", "= dataset(\"clt\") canvas = regression.init() isoline = canvas.createisoline() isoline.label=\"y\" texts=[]", "text = canvas.createtext() text.color = 50 + 12 * i", "regression.run_wo_terminate(canvas, testImage, baselineImage) # Now set isoline.textcolors and test again.", "baselineImage) # Now set isoline.linecolors and test again. canvas.clear() isoline.linecolors", "regression dataset = cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\")) data = dataset(\"clt\") canvas = regression.init()", "colors canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 2, baseline[1]) testImage", "canvas.clear() isoline.linecolors = colors canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0],", "test using isoline.text[...].color canvas.plot(data, isoline, bg=1) baseline = os.path.splitext(sys.argv[1]) baselineImage", "baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels2.png\") ret += regression.run_wo_terminate(canvas, testImage, baselineImage) #", "using isoline.text[...].color canvas.plot(data, isoline, bg=1) baseline = os.path.splitext(sys.argv[1]) baselineImage =", "os.path.splitext(sys.argv[1]) baselineImage = \"%s%s\"%baseline ret = regression.run_wo_terminate(canvas, \"test_vcs_isoline_labels.png\", baselineImage) #", "baselineImage = \"%s%s\"%baseline ret = regression.run_wo_terminate(canvas, \"test_vcs_isoline_labels.png\", baselineImage) # Now", "and test again. canvas.clear() isoline.textcolors = colors canvas.plot(data, isoline, bg=1)", "+ 12 * i text.height = 12 colors.append(100 + 12", "baselineImage) # Now set isoline.textcolors and test again. canvas.clear() isoline.textcolors", "again. canvas.clear() isoline.textcolors = colors canvas.plot(data, isoline, bg=1) baselineImage =", "colors = [] for i in range(10): text = canvas.createtext()", "bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 3, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels3.png\") ret", "= \"%s%d%s\"%(baseline[0], 2, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels2.png\") ret += regression.run_wo_terminate(canvas,", "= os.path.abspath(\"test_vcs_isoline_labels2.png\") ret += regression.run_wo_terminate(canvas, testImage, baselineImage) # Now set", "== 0: texts.append(text.name) else: texts.append(text) isoline.text = texts # First", "sys, cdms2, vcs, vcs.testing.regression as regression dataset = cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\")) data", "= regression.run_wo_terminate(canvas, \"test_vcs_isoline_labels.png\", baselineImage) # Now set isoline.linecolors and test", "again. canvas.clear() isoline.linecolors = colors canvas.plot(data, isoline, bg=1) baselineImage =", "in range(10): text = canvas.createtext() text.color = 50 + 12", "3, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels3.png\") ret += regression.run_wo_terminate(canvas, testImage, baselineImage)", "= 12 colors.append(100 + 12 * i) if i%2 ==", "regression.init() isoline = canvas.createisoline() isoline.label=\"y\" texts=[] colors = [] for", "# First test using isoline.text[...].color canvas.plot(data, isoline, bg=1) baseline =", "= cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\")) data = dataset(\"clt\") canvas = regression.init() isoline =", "First test using isoline.text[...].color canvas.plot(data, isoline, bg=1) baseline = os.path.splitext(sys.argv[1])", "cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\")) data = dataset(\"clt\") canvas = regression.init() isoline = canvas.createisoline()", "texts # First test using isoline.text[...].color canvas.plot(data, isoline, bg=1) baseline", "= colors canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 2, baseline[1])", "dataset = cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\")) data = dataset(\"clt\") canvas = regression.init() isoline", "# Now set isoline.textcolors and test again. canvas.clear() isoline.textcolors =", "test again. canvas.clear() isoline.linecolors = colors canvas.plot(data, isoline, bg=1) baselineImage", "= canvas.createisoline() isoline.label=\"y\" texts=[] colors = [] for i in", "canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 2, baseline[1]) testImage =", "baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels3.png\") ret += regression.run_wo_terminate(canvas, testImage, baselineImage) sys.exit(ret)", "isoline.textcolors = colors canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 3,", "range(10): text = canvas.createtext() text.color = 50 + 12 *", "bg=1) baseline = os.path.splitext(sys.argv[1]) baselineImage = \"%s%s\"%baseline ret = regression.run_wo_terminate(canvas,", "import os, sys, cdms2, vcs, vcs.testing.regression as regression dataset =", "canvas.createtext() text.color = 50 + 12 * i text.height =", "text.height = 12 colors.append(100 + 12 * i) if i%2", "2, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels2.png\") ret += regression.run_wo_terminate(canvas, testImage, baselineImage)", "isoline.textcolors and test again. canvas.clear() isoline.textcolors = colors canvas.plot(data, isoline,", "= colors canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 3, baseline[1])", "12 * i text.height = 12 colors.append(100 + 12 *", "\"test_vcs_isoline_labels.png\", baselineImage) # Now set isoline.linecolors and test again. canvas.clear()", "Now set isoline.linecolors and test again. canvas.clear() isoline.linecolors = colors", "i in range(10): text = canvas.createtext() text.color = 50 +", "\"%s%d%s\"%(baseline[0], 3, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels3.png\") ret += regression.run_wo_terminate(canvas, testImage,", "i%2 == 0: texts.append(text.name) else: texts.append(text) isoline.text = texts #", "as regression dataset = cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\")) data = dataset(\"clt\") canvas =", "+ 12 * i) if i%2 == 0: texts.append(text.name) else:", "regression.run_wo_terminate(canvas, \"test_vcs_isoline_labels.png\", baselineImage) # Now set isoline.linecolors and test again.", "canvas.clear() isoline.textcolors = colors canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0],", "+= regression.run_wo_terminate(canvas, testImage, baselineImage) # Now set isoline.textcolors and test", "* i) if i%2 == 0: texts.append(text.name) else: texts.append(text) isoline.text", "vcs, vcs.testing.regression as regression dataset = cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\")) data = dataset(\"clt\")", "isoline = canvas.createisoline() isoline.label=\"y\" texts=[] colors = [] for i", "= canvas.createtext() text.color = 50 + 12 * i text.height", "cdms2, vcs, vcs.testing.regression as regression dataset = cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\")) data =", "os.path.abspath(\"test_vcs_isoline_labels2.png\") ret += regression.run_wo_terminate(canvas, testImage, baselineImage) # Now set isoline.textcolors", "testImage, baselineImage) # Now set isoline.textcolors and test again. canvas.clear()", "bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 2, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels2.png\") ret", "= \"%s%s\"%baseline ret = regression.run_wo_terminate(canvas, \"test_vcs_isoline_labels.png\", baselineImage) # Now set", "colors canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 3, baseline[1]) testImage", "ret += regression.run_wo_terminate(canvas, testImage, baselineImage) # Now set isoline.textcolors and", "for i in range(10): text = canvas.createtext() text.color = 50", "50 + 12 * i text.height = 12 colors.append(100 +", "isoline.linecolors = colors canvas.plot(data, isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 2,", "baselineImage = \"%s%d%s\"%(baseline[0], 2, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels2.png\") ret +=", "set isoline.textcolors and test again. canvas.clear() isoline.textcolors = colors canvas.plot(data,", "\"%s%d%s\"%(baseline[0], 2, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels2.png\") ret += regression.run_wo_terminate(canvas, testImage,", "\"%s%s\"%baseline ret = regression.run_wo_terminate(canvas, \"test_vcs_isoline_labels.png\", baselineImage) # Now set isoline.linecolors", "canvas = regression.init() isoline = canvas.createisoline() isoline.label=\"y\" texts=[] colors =", "Now set isoline.textcolors and test again. canvas.clear() isoline.textcolors = colors", "= os.path.splitext(sys.argv[1]) baselineImage = \"%s%s\"%baseline ret = regression.run_wo_terminate(canvas, \"test_vcs_isoline_labels.png\", baselineImage)", "vcs.testing.regression as regression dataset = cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\")) data = dataset(\"clt\") canvas", "colors.append(100 + 12 * i) if i%2 == 0: texts.append(text.name)", "isoline, bg=1) baselineImage = \"%s%d%s\"%(baseline[0], 3, baseline[1]) testImage = os.path.abspath(\"test_vcs_isoline_labels3.png\")", "canvas.plot(data, isoline, bg=1) baseline = os.path.splitext(sys.argv[1]) baselineImage = \"%s%s\"%baseline ret", "= regression.init() isoline = canvas.createisoline() isoline.label=\"y\" texts=[] colors = []", "os, sys, cdms2, vcs, vcs.testing.regression as regression dataset = cdms2.open(os.path.join(vcs.sample_data,\"clt.nc\"))", "12 * i) if i%2 == 0: texts.append(text.name) else: texts.append(text)", "* i text.height = 12 colors.append(100 + 12 * i)", "isoline.text[...].color canvas.plot(data, isoline, bg=1) baseline = os.path.splitext(sys.argv[1]) baselineImage = \"%s%s\"%baseline", "isoline.linecolors and test again. canvas.clear() isoline.linecolors = colors canvas.plot(data, isoline," ]
[ "svm.SVC(kernel='linear'), \\ # random_state=None, n_estimators = 100 ) model =", "y_pred_all[:, 0] target = y_pred_te y_pred = f_quantileNorm(templete, target) else:", "y_pred = y_pred_te y_pred_all[:, i+1] = y_pred return y_pred_all #", "[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) [dec_mat, dec_ixs] =", "None): ''' % the random walk algorithm. % A is", "Euclidean distance matrix') # --------------------------------------------------------------- aRank_k_neighbors = np.ceil(np.log10(len(sim)) ) ori_graph", "version ''' y = y.flatten() y_pred = np.zeros(y.size) skf =", "scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3 clus", "np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred else: if len(yj) <=", "data # start from the second col, the result is", "model, w, s) y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs,", "copy.deepcopy(model) model_inner.fit(X_tr, y_tr) return model_inner def f_te(X_te, model): y_pred =", "np.fill_diagonal(rw_net, True) clus = [] for i in range(0, k):", "one cluster. # % This updated version also outputs the", "go back. % p0: the initial probability. usually it is", "for soft % clustering. If we just random pickup some", "0]]) # #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) # #array([[False, True, True, False],", "a tmp array a stores y tmp = np.matlib.repmat(y.reshape((y.size, 1)),", "[] inst_clus = f_tfs_2_instClus(tfs) for i in range(0, len(err_mat)): #", "else: y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError as e: #", "scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show() #sim = np.corrcoef(X)", "y, clus): # ''' # Calculate the decision table #", "j in range(0, len(X_te) ): # for each testing instance", "y): # #print(\"TRAIN:\", train_index, \"TEST:\", test_index) # X_tr, X_te =", "= f_err_mat(X_tr, y_tr, clus, model) # ******************************************************* [dec_mat, dec_ixs] =", "list( np.where(tfs[i, :] ) [0] ) inst_clus.append(row) return inst_clus #", "cluster # the last col stores the pred by whole", "model) #if np.unique(yj).size != 1: model_a_clust.fit(Xj, yj) y_pred = model_a_clust.predict_proba(X_other)", "= plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show() #sim = np.corrcoef(X) #np.fill_diagonal(sim, 0)", "test_index) # X_tr, X_te = X[train_index], X[test_index] # y_tr, y_te", "#print 'whole model good ' # start from the second", "second col, the result is by each cluster for i", "j in range(1, 50): for j in range(1, 49): try:", "0 0 0.5000 0 % 0 0 0 0 0", "matrix') # --------------------------------------------------------------- aRank_k_neighbors = np.ceil(np.log10(len(sim)) ) ori_graph = f_sim_2_aRankNet(sim,", "---------- find all neighbors' picks ---------- clus_ids_to_use = [] nei_labels", "need to convert clus to a true-false matrix. ''' if", "Matlab def f_sim_2_aRankNet(sim, k=3): ''' % Convert the similarity matrix", "the decision table corresponds to f_dec_tab_4_bg_svm() in Matlab version '''", "% set the diagnal to 1 np.fill_diagonal(rw_net, True) clus =", "not good for fuzzy clusters. % k is the number", "+ '\\t' + str(auc_ICE) + ' \\t ' + str(auc_whole)", "used later as KNN pred # instead of using whole", "#bagging = BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators = 100 )", "print(e) print(' skip this cluster') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:,", "np.array([[0,2,2,0,0,0,0],\\ [2,0,1,1,0,0,0],\\ [2,1,0,0,1,0,0],\\ [0,1,0,0,0,1,1],\\ [0,0,1,0,0,0,0],\\ [0,0,0,1,0,0,1],\\ [0,0,0,1,0,1,0]]) nSteps = 1000", "X_tr[clus[i].flatten(), :] #yi = y_tr[clus[i].flatten() ] model_i = models[i] #model_a_clust", "0 0 0 0 0 % 0 0.2500 0 0", "X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X", "# test #sim = np.array([[0, 0.5566, 0.6448, 0.3289], \\ #", "avgNeighborsSize): tmp = rw_mat.flatten('F') a = np.flip(np.sort(tmp), 0) len1 =", "1 1 0 0 0 #% 1 1 1 1", "print (' '+str(len(yj))+' insts in cluster, <= 15, skip...') y_pred", "test RWR() ''' A = np.array([[0,2,2,0,0,0,0],\\ [2,0,1,1,0,0,0],\\ [2,1,0,0,1,0,0],\\ [0,1,0,0,0,1,1],\\ [0,0,1,0,0,0,0],\\", "RWR clustering') return clus_ordered #test #clus = f_fuzzy_rwr_clusters(X, 100) #", "## k_fold of inner cross-validation # #fo_inner = 10 #", "'/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']", "to predict Use each cluster of training data to predict", "sklearn.exceptions import NotFittedError #import FuzzyRwrBagging as frb #from joblib import", "avgNeighborsSize * len1 ) print( all_neibs) ct = a[all_neibs] return", "find the rest nodes for i in range(1, k): tmp", "print(' c-'+str(j)+' done predicting remote instances') except ValueError as e:", "of inner cross-validation # #fo_inner = 10 # # ---------------------------", "# --------------------------- SELF ------------------------- def f_err_mat(X, y, clus, model): '''", "the templete. Target may have a very different range than", "l2norm return p # test RWR() ''' A = np.array([[0,2,2,0,0,0,0],\\", "clustering. If we just random pickup some nodes as centroids,", "the first centroid. a = np.sum(rw_mat, axis=1) # axis=1 for", "# # corresponds to f_dec_tab_4_bg_svm() in Matlab version # '''", "# % This version 3 adds the support for fuzzy", "k_fold=10): # ''' # corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version", "y_pred_all a = clus_ids_to_use a = list( np.array(a) + 1", "from two datasets ''' sim = np.zeros(( len(X1), len(X2) ))", "NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) )", "belongs to, thus we also need to convert clus to", "# 30:breast cancer ##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] #", "% % for example, A could be: % A =", "#tfs = f_clus_to_tfs(clus, len(X)) # pass def f_tfs_2_instClus(tfs): ''' convert", "with the diag to % be 1. % % for", "range(1, 49): try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer y", "be % 0.2500 0 0 0 0 0 0 %", "= distance.euclidean(X1[i], X2[j]) sim[i][j] = tmp sim = -sim return", "sim[j][i] = tmp sim = -sim print(' done calculating the", "row ix_good_clus = list( np.where( row < row[-1] ) [0]", "is a zero matrix with the diag to % be", "0 0 0.5000 0.5000 % 0 0 0.2500 0 0", "y_pred_te.fill(np.nan) #except NotFittedError as e: # print(repr(e)) # y_pred_te =", "prediction using the whole data # start from the second", "= clus_ids_to_use + [0] * n_whole #print ' clus_ids_to_use:' #print", "#import multiprocessing def RWR(A, nSteps, laziness, p0 = None): '''", "100 ) #y_pred = f_k_fo(X, y, model, k_fold=10) # #print", "= 1 k_fold = 10 aucs_ICE = [] aucs_whole =", "#templete = X[:, 0] #target = X[:, 1] #new =", "models] # test #[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole,", "clus, dec_ixs, models,N=5,alpha=1,beta=1): ''' clus and inst_clus contains the same", "converge. For example, for the above matrix, nSteps = %", "True, True], # [ True, False, False, False], # [", "= a[all_neibs] return ct #test #>>> a = np.array([[1,2], [3,4]])", "double #% 1 0 0 0 0 #% 1 1", "% 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;... % 0.048,", "euclidean distance between instances from two datasets ''' sim =", "matrix, nSteps = % 100, 1000 or 10000, will give", "double 1x169 double 1x161 double 1x62 double #% #% tfs", "np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0]) # copy to the whole array y_pred_multi[ix_other,", "% be 1. % % for example, A could be:", "the predicted values of y. # % support more than", "= len(clus) # err_mat stores the prediction error. pred_prob_mat=np.zeros((y.size, n_clusters+1))", "= svm.SVC(kernel='linear'), \\ # random_state=None, n_estimators = 100 ) model", "0 0.5000 0 ''' #W = A * spdiags(sum(A)'.^(-1), 0,", "'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100 w = 0.4 s = 0.5", "print(' done RWR clustering') return clus_ordered #test #clus = f_fuzzy_rwr_clusters(X,", "will be used later as KNN pred # instead of", "[~, I2] = sort(I); I2 = (np.argsort(I, kind='mergesort').T + 1).T", "# err_mat stores the prediction error. pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra", "0.356, 0.004, 0.004;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.425,", "of partial models used n_partial = len(clus_ids_to_use) # number of", "clus_ids_to_use:' #print clus_ids_to_use #print nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use]) print", "0.028;... % 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;... %", "--------------------------------------------------------------- ixs_centers = f_find_centers_rwMat(rw_mat, k) ct = getCutoff(rw_mat, each_clus_sz) rw_net", "for each #% instance, the rank of clusters information will", "= squareform(pdist(X)); # sim = -sim; sim = np.zeros((len(X), len(X)", "= f_ICE_tr_te_all_clus(X_tr, X_te, clus, models) y_pred_ICE = np.zeros( len(X_te) )", "): tmp = distance.euclidean(X1[i], X2[j]) sim[i][j] = tmp sim =", "+ str(j) #print ' ix of top neighbors:' #print ix_top_neighbors", "the pred by whole data #models = [] # k_fold", "more than 3 clusters # % normalization take place in", "as AUC, I will try using the predicted prob at", "sklearn.ensemble import BaggingClassifier from sklearn import svm #from sklearn import", "return tfs # test #tfs = f_clus_to_tfs(clus, len(X)) # pass", "= 100 ) # bagging.fit(X_tr, y_tr) # # y_pred =", "n_clusters = len(clus) # err_mat stores the prediction error. pred_prob_mat=np.zeros((y.size,", "= X_tr[clus[i].flatten(), :] #yi = y_tr[clus[i].flatten() ] model_i = models[i]", "int( round( alpha*n_partial + beta*N ) ) clus_ids_to_use = clus_ids_to_use", "0.029, 0.081, 0.009, 0.356, 0.004, 0.004;... % 0.017, 0.035, 0.014,", "''' dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool) # dec_ixs:", "= y[train_index], y[test_index] y_tr = y[train_index] if np.unique(y_tr).size == 1:", "# y_pred.fill(np.unique(yj)[0]) #else: try: y_pred = f_k_fo(Xj, yj, model, fo_inner)", "the similarity matrix to a network graph where each node", "= 0.5 N = 5 alpha = 1 beta =", "np.fill_diagonal(sim, 0) return sim #test #sim = f_eu_dist(X) def f_eu_dist2(X1,", "#% 1 1 1 0 0 tfs = np.zeros((n_inst, len(clus)),", "4]) ''' a = np.array( range(0,100) ) b = np.matlib.repmat(a,", "y_tr, y_te = y[train_index], y[test_index] def f_use_each_clus_forWhole(X, y, clus, y_pred_whole,", "skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE = np.zeros( y.size )", "the centroids for soft % clustering. If we just random", "axis=1) b[ixs] = np.inf # % find the farthest node", "0 0.3333 0 0.5000 0 ''' #W = A *", "clus_ordered = [clus[i] for i in ix] print(' center inst.", "to f_k_fo() # ''' # y_pred = np.zeros((y.size, 1)) #", "the A-rank KNN graph') # % -------- RWR -------- nSteps", "j in range(i+1, len(X)): tmp = distance.euclidean(X[i], X[j]) sim[i][j] =", "edges. #aRankNet = (I2 >length(sim)-k); aRankNet = I2 > (len(sim)", "# --------------------------- WHOLE ------------------------- # Predict each cluster using the", "# --------------------------- SELF ------------------------- # predict the whole instances using", "str(j) #print ' ix of top neighbors:' #print ix_top_neighbors #", "f_sim_2_aRankNet(sim, k=3): ''' % Convert the similarity matrix to a", "50): for j in range(1, 49): try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X']", "done RWR clustering') return clus_ordered #test #clus = f_fuzzy_rwr_clusters(X, 100)", "ix_templete = np.argsort(templete, kind='mergesort') target[ix_target] = templete[ix_templete] new = target", "#import FuzzyRwrBagging as frb #from joblib import Parallel, delayed #import", "X_te, y_te, BaseBagging): # ''' # corresponds to f_weka_bg_svm_tr_te() in", "#def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te): # #bagging = BaggingClassifier(base_estimator =", "np.zeros( len(X_te) ) for j in range(0, len(X_te) ): #", "only a fold pred_prob_mat[:, n_clusters] = y_pred_whole print (' Done", "n_clusters+1)) #the extra col is for whole pred # #", "except : if model_i == 0: y_pred_te = np.zeros(len(X_te)) elif", "err_mat stores the prediction error. pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col", "model) # ******************** re-clustering ******************** n_iter = 2 for i", "stores the good cluster(s) for each instance, which may include", "0 0.5000 0 % 0 0 0 0 0 0", "each cluster Xj = X[clus[j].flatten(), :] yj = y[clus[j].flatten() ]", "version ''' n_clusters = len(clus) # err_mat stores the prediction", "random_state=None, n_estimators = 100 ) model = svm.SVC(kernel='linear', probability =", "of centroids. ''' ixs = [] # 1. find the", "the number of # instances. each_clus_sz=len(X)/3 print('RWR-based fuzzy clustering starts...')", "len(test_index) #print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] ) #print y_pred_fo else: y_pred_fo =", "target) #else: # y_pred = np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0]) # copy", "np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1) err_mat = abs(pred_prob_mat - tmp )", "# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ # bagging =", "# #array([[False, True, True, True], # [ True, False, False,", "print(lens[ix]) print(' done RWR clustering') return clus_ordered #test #clus =", "= 1 for train_index, test_index in skf.split(X, y): # print(\"TRAIN:\",", "= np.array(ixs_centers) print(ixs_centers[ix]) print(' size of each cluster: ') print(lens[ix])", "col 0 to col n_clusters-1 store the predictions by each", "model_a_clust = copy.deepcopy(model) print(' Cluster '+str(j)+' started...') #if len(yj) >", "w, s) #[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model,", "how many clusters does one inst belongs to. #overlap_factor =", "A-rank KNN graph') # % -------- RWR -------- nSteps =", "len(clus) + 1 )) # the first col is the", "Done calculating error table and fitting ICE models') return [err_mat,", "two datasets ''' sim = np.zeros(( len(X1), len(X2) )) for", "5 # --------------------------- WHOLE ------------------------- # Predict each cluster using", "remove probability of returning start node np.fill_diagonal(rw, 0) rw_mat =", "s=0.5): ''' ''' # rwr based fuzzy clustering clus =", "col is for whole pred # col 0 to col", "% if nSteps is 1000 and laziness is 0.3, p0", "= roc_auc_score(y.flatten(), y_pred) return y_pred # test #pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'", "37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show()", "#if np.unique(yj).size != 1: model_a_clust.fit(Xj, yj) y_pred = model_a_clust.predict_proba(X_other) y_pred", "aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) + '\\t' + str(auc_ICE) + ' \\t", "be used. dec_ixs = [] inst_clus = f_tfs_2_instClus(tfs) for i", "0.4 s = 0.5 N = 5 alpha = 1", "auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() )", "= np.zeros( len(test_index) ) #print len(X_te) #print len(test_index) #print y_pred_fo", "X_te) # ---------- for each testing instance ---------- #n_partials =", "copy.deepcopy(model) y_pred_whole = f_k_fo(X, y, model_whole, fo_inner) model_whole.fit(X, y) #", "be: % A = [0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;... %", "copy import numpy.matlib from sklearn.exceptions import NotFittedError #import FuzzyRwrBagging as", "0: y_pred_te = np.zeros(len(X_te)) elif model_i == 1: y_pred_te =", "diagonal matrix # aRankNet = max(aRankNet, aRankNet'); aRankNet = np.logical_or(aRankNet,", "print(' skip this cluster') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j]", ") ix = ix[::-1] ix_top_neighbors = ix[0:N] #print 'testing inst", "tfs, w, s) print (' Done calucating decision table') return", "does not contain the whole set of instances. ''' #", "n, n); #W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n,", "# [False, False, True, False]]) # #array([[False, True, True, True],", "you won't know #% what is the top 1/2/3 cluster", "= np.zeros( len(X_te) ) neighbour_mat = f_eu_dist2(X_tr, X_te) # ----------", "rows; 0 for col # % most connected node. ix", "---------- for each testing instance ---------- #n_partials = np.zeros( len(X_te)", "Convert the err table to decision table. ''' dec_mat =", "probability = True) skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE =", "tfs, adv_whole=0.4, adv_self=0.5): ''' Convert the err table to decision", "= int( round( alpha*n_partial + beta*N ) ) clus_ids_to_use =", "matrix # aRankNet = max(aRankNet, aRankNet'); aRankNet = np.logical_or(aRankNet, aRankNet.T)", "dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool) # dec_ixs: for", "0 0 0 #% 1 1 1 0 0 tfs", "np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j] = y_pred model_a_clust = np.unique(yj)[0] models.append(model_a_clust)", "N. n_whole = int( round( alpha*n_partial + beta*N ) )", "j in range(1, 49): try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast", "dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return [dec_mat, dec_ixs] #[dec_mat, dec_ixs] = f_err_2_decMat(err_mat,", "element is a 1d double #% array. This function counts", "sim = -sim; sim = np.zeros((len(X), len(X) ) ) for", "p0 is default, the result is: % [0.449, 0.207, 0.220,", "kind='mergesort') + 1 # [~, I2] = sort(I); I2 =", "= np.sum(tmp, axis=1) b[ixs] = np.inf # % find the", "n_estimators = 100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus,", "n, n).toarray() ) p = p0 pl2norm = np.inf unchanged", "'+str(k)+'; avg. cluster size = '+str(each_clus_sz) ) # sim =", "'/Users/zg/Dropbox/bio/ICE_2018/' # pa = './' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus =", "e.g: #% 1x5 cell #% 1x195 double 1x193 double 1x169", "instances. each_clus_sz=len(X)/3 print('RWR-based fuzzy clustering starts...') print(' NO. clusters =", "models.append(model_a_clust) return [y_pred_multi, models] # test #[y_pred_multi, models] = f_use_each_clus_forWhole(X,", "np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred if np.unique(yj).size == 1:", "the farthest node ix = np.argmin(b) ixs.append(ix) return ixs #", "support more than 3 clusters # % normalization take place", "in range(0, len(clus)): #Xi = X_tr[clus[i].flatten(), :] #yi = y_tr[clus[i].flatten()", "in Matlab version ''' y = y.flatten() y_pred = np.zeros(y.size)", "0 0 1.0000 0 0 % 0 0 0 0", "mesothelioma get 1.0 CV result. # breast cancer get 0.599", "clus, model) def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): '''", "models] = f_err_mat(X_tr, y_tr, clus, model) [dec_mat, dec_ixs] = f_err_2_decMat(err_mat,", "AUC, I will try using the predicted prob at first.", "predict the whole instances using each cluster data, while self", "= sim-diag(diag(sim) ); np.fill_diagonal(sim, 0) # [~, I] = sort(sim-diag(diag(sim)", "''' ix_target = np.argsort(target, kind='mergesort') ix_templete = np.argsort(templete, kind='mergesort') target[ix_target]", "instances.') return y_pred_ICE # test # pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' #", "- adv_whole inst_i_clus = inst_clus[i] if len(inst_i_clus) > 0: row[inst_i_clus]", "0 1.0000 0 0 % 0 0.2500 0 0 0", "axis=1 for rows; 0 for col # % most connected", "code: #dec_row = dec_mat(cur_nb_ix, :); #dec_row(:, end ) = dec_row(:,", "0.5000 % 0 0 0 0.3333 0 0.5000 0 '''", "#array([[False, True, True, True], # [ True, False, False, False],", "center node as the first centroid. a = np.sum(rw_mat, axis=1)", "err table to decision table. ''' dec_mat = np.zeros(( len(err_mat),", "\\ # [0.5566, 0, -0.0842, -0.0170], \\ # [0.6448, -0.0842,", "models[i] #model_a_clust = copy.deepcopy(model) try: y_pred_te = f_te(X_te, model_i) except", "0 0 % 0 0 0 0.3333 0 0 0", "contain the whole set of instances. ''' # the first", "= scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X =", "0.167, 0.132, 0.117, 0.071, 0.071;... % 0.220, 0.167, 0.463, 0.052,", "y_te = y [y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te)", "# make a tmp array a stores y tmp =", "f_use_each_clus_forWhole(X, y, clus, \\ y_pred_whole, model, fo_inner) print (' Done", "cluster, <= 15, skip...') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j]", "ct #test #>>> a = np.array([[1,2], [3,4]]) #>>> a.flatten() #array([1,", "len(X2) )) for i in range(0, len(X1) ): for j", "= tmp sim = -sim np.fill_diagonal(sim, 0) return sim #test", "corresponds to f_dec_tab_4_bg_svm() in Matlab version ''' n_clusters = len(clus)", "n_estimators = 100 ) [err_mat, models] = f_err_mat(X, y, clus,", "results are correct. def f_quantileNorm(templete, target): ''' Templete is the", "training data to predict the testing data. Use whole training", "col is the prediction using the whole data model_whole =", "[] # 1. find the most connected center node as", "distance between instances from two datasets ''' sim = np.zeros((", "top 1/2/3 cluster it belongs to. #% #% clus e.g:", "tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1) err_mat = abs(pred_prob_mat -", "predicting local instances') # ------------------ for other ----------------- ix_other =", "ix_top_neighbors # ---------- find all neighbors' picks ---------- clus_ids_to_use =", "= np.ones(len(X_te)) else: y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError as", "0.014, 0.154, 0.009, 0.203, 0.425] % % Each column represents", "n_estimators = 100 ) model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) y_pred", "in dec_ixs does not contain the whole set of instances.", "0 0.2500 0.3333 0 0 0 % 0.5000 0.2500 0", "is 0.3, p0 is default, the result is: % [0.449,", "# random_state=None, n_estimators = 100 ) #y_pred = f_k_fo(X, y,", "each testing instance ---------- #n_partials = np.zeros( len(X_te) ) #n_wholes", "target return new # test #templete = X[:, 0] #target", "easy dataset mesothelioma get 1.0 CV result. # breast cancer", ") #print ix_good_clus if len(ix_good_clus) > 0: dec_mat[i, ix_good_clus] =", "X[test_index] y_tr, y_te = y[train_index], y[test_index] [clus, models, dec_ixs] =", "= y_pred model_a_clust = np.unique(yj)[0] models.append(model_a_clust) return [y_pred_multi, models] #", "I2] = sort(I); I2 = (np.argsort(I, kind='mergesort').T + 1).T #", "table') return [clus, models, dec_ixs] #def_deal_miss_v_1(d): ''' deal with missing", "of y. # % support more than 3 clusters #", "> 15 and np.unique(yj).size != 1: # ------------------ for self", "that is % not good for fuzzy clusters. % k", "# % This updated version also outputs the predicted values", "# make it a diagonal matrix # aRankNet = max(aRankNet,", "0,0,0,1,0,1,0] % % if nSteps is 1000 and laziness is", "X_te, y_te): # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ #", "f_k_fo() # ''' # y_pred = np.zeros((y.size, 1)) # #", "adv_self #print row ix_good_clus = list( np.where( row < row[-1]", "len(c1)): lens[i] = len(c1[i]) return lens def f_eu_dist(X): ''' calculate", "in y_pred_all a = clus_ids_to_use a = list( np.array(a) +", "Mar 5 05:47:03 2018 @author: zg \"\"\" import numpy as", "# on average, how many clusters does one inst belongs", "y_pred # test #pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] #", "# sim = sim-diag(diag(sim) ); np.fill_diagonal(sim, 0) # [~, I]", "to the whole array y_pred_multi[ix_other, j] = y_pred print(' c-'+str(j)+'", "delayed #import multiprocessing def RWR(A, nSteps, laziness, p0 = None):", "% 0,0,0,1,0,1,0] % % if nSteps is 1000 and laziness", "end ) - adv_whole; #dec_row(:, clus_id) = dec_row(:, clus_id) -", "normed prediction to the whole data. y_pred_multi[clus[j].flatten(), j] = y_pred", "adv_self; row = np.copy( err_mat[i, :] ) #print row row[-1]", "print('RWR-based fuzzy clustering starts...') print(' NO. clusters = '+str(k)+'; avg.", "= tmp sim = -sim print(' done calculating the Euclidean", "#mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa", "remove the diagonal 1s. # aRankNet = aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet,", "= X[ix_other , :] #y_other = y[ix_other ] # predict", "= y_pred_ICE y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model) y_preds_whole[test_index] =", "#% tfs e.g: #% 295x5 double #% 1 0 0", "predict #y_pred = f_tr_te(Xj, yj, X_other, model) #if np.unique(yj).size !=", "= StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True) skf.get_n_splits(X, y) for train_index, test_index in", "is the standard, change the target to the values in", "--------------------------- SELF ------------------------- # predict the whole instances using each", "0.2500 0 0 0 0 0 % 0 0 0.2500", "ix_top_neighbors[cur_nb] clus_id_to_use = list( dec_ixs[cur_nb_ix] ) clus_ids_to_use = clus_ids_to_use +", "for every column, just keep the top k edges. #aRankNet", "0.099, 0.039, 0.431, 0.027, 0.232, 0.232;... % 0.038, 0.029, 0.081,", "def f_clus_to_tfs(clus, n_inst): #% convert the cluster information from cell", "0.3333 0 0 0 % 0.5000 0.2500 0 0 1.0000", "and inst_clus contains the same information that clus is the", "instance, which may include even a remote cluster. each instance", "the whole array y_pred_multi[ix_other, j] = y_pred print(' c-'+str(j)+' done", "solution will avoid the case that if one cluster decision", "the % column means the probability to go to that", "ix_good_clus = list( np.where( row < row[-1] ) [0] )", "y_pred return y_pred_all # test #y_pred_all = f_ICE_tr_te_all_clus(X, X, clus,", "sim = -sim print(' done calculating the Euclidean distance matrix')", "data to predict the testing data. ''' y_pred_all = np.zeros((", "= f_use_each_clus_forWhole(X, y, clus, \\ y_pred_whole, model, fo_inner) print ('", "the decision table # % This version changed from the", "= list(ix_other) #print ix_other X_other = X[ix_other , :] #y_other", "#unique class label(s) == 1') y_pred = np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:,", "y_pred) return y_pred def f_k_fo(X, y, model, k_fold=10): ''' corresponds", "doNorm=True): ''' Use the training data to predict the testing", "ix[::-1] ix_top_neighbors = ix[0:N] #print 'testing inst ' + str(j)", "0.081, 0.009, 0.356, 0.004, 0.004;... % 0.017, 0.035, 0.014, 0.154,", "model) if doNorm == True: templete = y_pred_all[:, 0] target", "f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging): # ''' # corresponds to", "f_tr(X_tr, y_tr, model): model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) return model_inner", "np.unique(y_tr).size == 1: y_pred_fo = np.zeros( len(test_index) ) #print len(X_te)", ") models = [] for j in range(0, n_clusters): #", "the clusters lens = f_len_of_each_ele(clus) ix = np.argsort(lens)[::-1] clus_ordered =", "len(clus) # ## dec_mat stores the prediction error. # #pred_mat=np.zeros((y.size,", "5 05:47:03 2018 @author: zg \"\"\" import numpy as np", "#models = [] # k_fold of inner cross-validation fo_inner =", "# [ True, False, True, False]]) # #array([[False, True, True,", "clus_ids_to_use # cluster id + 1 to make the ix", "= pa + 'data/res_ICE_bg_svm_py.txt' f_res = pa + 'data/res_ICE_SVM_py.txt' f", "row < row[-1] ) [0] ) #print ix_good_clus if len(ix_good_clus)", "return aRankNet # test #sim = np.array([[0, 0.5566, 0.6448, 0.3289],", "'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa + 'data/res_ICE_bg_svm_py.txt' f_res = pa +", "ixs_centers = np.array(ixs_centers) print(ixs_centers[ix]) print(' size of each cluster: ')", "find the top 10 neighbors for each test instance neighbour_col", "to % be 1. % % for example, A could", "= np.zeros(( len(X_te), len(clus) + 1 )) # the first", "form. # # corresponds to f_dec_tab_4_bg_svm() in Matlab version #", "n_whole = int( round( alpha*n_partial + beta*N ) ) clus_ids_to_use", "diag to be 0. % nSteps: how many steps to", "from sklearn import svm #from sklearn import metrics from sklearn.metrics", "= np.flip(np.sort(tmp), 0) len1 = len(rw_mat) #cutoffs = [] all_neibs", "= y_pred_all[:, 0] target = y_pred_te y_pred = f_quantileNorm(templete, target)", "% 0 0.5000 0.5000 0 0 0 0 % 0.5000", "In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be", "max(aRankNet, aRankNet'); aRankNet = np.logical_or(aRankNet, aRankNet.T) # remove the diagonal", "= f_err_mat(X_tr, y_tr, clus, model) # ******************** re-clustering ******************** n_iter", "instance, which are the clusters it belongs to, thus we", "np.zeros(( len(X1), len(X2) )) for i in range(0, len(X1) ):", "clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus,", "cluster. However, this data structure is not easy to find", "''' corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version ''' y =", "n_clus, model, w, s) #[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr,", "does one inst belongs to. #overlap_factor = 2; # the", "' + str(j) #print ' ix of top neighbors:' #print", "% Each column represents the propability for each node. each", "# col 0 to col n_clusters-1 store the predictions by", "#import pickle from sklearn.model_selection import StratifiedKFold #import sklearn from scipy.sparse", "clusters information will be lost - you won't know #%", "\\ #bagging = BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators = 100", "y_te = y[train_index], y[test_index] def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model,", "values of y. # % support more than 3 clusters", "using all data rather than only a fold pred_prob_mat[:, n_clusters]", "result. # breast cancer get 0.599 # all results are", "3 clusters # % normalization take place in y_pred_self and", "= f_clus_to_tfs(clus, len(X)) y = y.astype(float) #model = BaggingClassifier(base_estimator =", "-*- \"\"\" Created on Mon Mar 5 05:47:03 2018 @author:", "tfs[clus[i], i] = True return tfs # test #tfs =", "0: row[inst_i_clus] = row[inst_i_clus] - adv_self #print row ix_good_clus =", "the top 10 neighbors for each test instance neighbour_col =", "if i % 100 == 0: print(' done rwr '", "len(tfs)): row = list( np.where(tfs[i, :] ) [0] ) inst_clus.append(row)", ") auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE)", "# --------------------------------------------------------------- ixs_centers = f_find_centers_rwMat(rw_mat, k) ct = getCutoff(rw_mat, each_clus_sz)", "RWR') # --------------------------------------------------------------- ixs_centers = f_find_centers_rwMat(rw_mat, k) ct = getCutoff(rw_mat,", "0. % nSteps: how many steps to walk % laziness:", "295x5 double #% 1 0 0 0 0 #% 1", "walk % laziness: the probablity to go back. % p0:", "w = 0.4 s = 0.5 N = 5 alpha", "= scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph,", "1x161 double 1x62 double #% #% tfs e.g: #% 295x5", "!= 1: model_a_clust.fit(Xj, yj) y_pred = model_a_clust.predict_proba(X_other) y_pred = y_pred[:,", "array to mat. But for each #% instance, the rank", "a = list( np.array(a) + 1 ) clus_ids_to_use = a", "#the extra col is for whole pred # col 0", "''' n_clusters = len(clus) # err_mat stores the prediction error.", "np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten() clus.append(tmp) # --------------------------------------------------------------- # % sort", "#imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show() #sim = np.corrcoef(X) #np.fill_diagonal(sim,", "# #print roc_auc_score(y.flatten(), y_pred) # the easy dataset mesothelioma get", "rw_mat > ct # % set the diagnal to 1", "rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0]", "clus_ids_to_use]) print ('Done predicting testing instances.') return y_pred_ICE # test", "= y_pred print(' c-'+str(j)+' done predicting remote instances') except ValueError", "= BaggingClassifier(base_estimator = svm.LinearSVC(), \\ # bagging = BaggingClassifier(base_estimator =", "net matrix, with the diag to be 0. % nSteps:", "[y_pred_multi, models] # test #[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus,", "'3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X =", "f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1): ''' clus and inst_clus", "table # % This version changed from the cluster-cluster dec_mat", "pl2norm: unchanged = unchanged +1 if unchanged > 10: break", "what is the top 1/2/3 cluster it belongs to. #%", "np.eye(n) ''' % In the example above, spdiags(sum(A)'.^(-1), 0, n,", "is for whole pred # col 0 to col n_clusters-1", "= np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred else: if len(yj)", "inner cross-validation fo_inner = 5 # --------------------------- WHOLE ------------------------- #", "of instability. However, we cannot use a systematic evaluation criteria", "0 0.3333 0 0 0 % 0 0 0 0", "#% 1 1 1 0 0 #% 1 1 0", "error. # #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole", "n_clus = 3 clus = f_fuzzy_rwr_clusters(X, n_clus) tfs = f_clus_to_tfs(clus,", "model) \"\"\" def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5): ''' Convert the", "model_i = models[i] #model_a_clust = copy.deepcopy(model) try: y_pred_te = f_te(X_te,", "return p # test RWR() ''' A = np.array([[0,2,2,0,0,0,0],\\ [2,0,1,1,0,0,0],\\", "= f_clus_to_tfs(clus, len(X)) # pass def f_tfs_2_instClus(tfs): ''' convert the", "each instance in dec_ixs does not contain the whole set", "# % need normalization when predict y_pred_ICE. # % ixsp", "each cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models) y_pred_ICE =", "print( all_neibs) ct = a[all_neibs] return ct #test #>>> a", "# % most connected node. ix = np.argmax(a) ixs.append(ix) #", "#array([1, 3, 2, 4]) ''' a = np.array( range(0,100) )", "prediction using the whole data model_whole = models[-1] y_pred_all[:, 0]", "= -sim print(' done calculating the Euclidean distance matrix') #", "of each cluster: ') ixs_centers = np.array(ixs_centers) print(ixs_centers[ix]) print(' size", "the templete. templete and target should be 1d n by", "\\ # [0.6448, -0.0842, 0, 0.8405], \\ # [0.3289, -0.0170,", "# skf.get_n_splits(X, y) # # for train_index, test_index in skf.split(X,", "= f_tr_te(X_tr, y_tr, X_te, model) #print 'whole model good '", "# #array([[False, True, True, False], # [ True, False, False,", "y_tr, clus, model) [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)", ")) for i in range(0, len(X1) ): for j in", "y_pred = y_pred[:, 1].flatten() # # auc = roc_auc_score(y_te.flatten(), y_pred)", "0.154, 0.009, 0.203, 0.425] % % Each column represents the", "] # predict #y_pred = f_tr_te(Xj, yj, X_other, model) #if", "cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use = list( dec_ixs[cur_nb_ix] ) clus_ids_to_use =", "); np.fill_diagonal(sim, 0) # [~, I] = sort(sim-diag(diag(sim) ) );", "= np.argmin(b) ixs.append(ix) return ixs # test #tmp = f_find_centers_rwMat(rw_mat,", "np.logical_or(aRankNet, aRankNet.T) # remove the diagonal 1s. # aRankNet =", "1 ) clus_ids_to_use = a # number of partial models", "3 clus = f_fuzzy_rwr_clusters(X, n_clus) tfs = f_clus_to_tfs(clus, len(X)) y", "f_k_fo(X, y, model_whole, fo_inner) model_whole.fit(X, y) # fit a model", "1)) # # skf = StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X, y) #", "row[inst_i_clus] = row[inst_i_clus] - adv_self #print row ix_good_clus = list(", "1 1 #% 1 0 0 0 0 #% 1", "def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None): # X: data # k: number", "ix_other = list(ix_other) #print ix_other X_other = X[ix_other , :]", "evaluation using whole instances') print (' Start to evaluate each", "instances') except ValueError as e: print(e) print(' skip this cluster')", "roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) +", "100 w = 0.4 s = 0.5 N = 5", "0.232;... % 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;... %", "the instance indices for each cluster. However, this data structure", "% % if nSteps is 1000 and laziness is 0.3,", "ix[0:N] #print 'testing inst ' + str(j) #print ' ix", "y, clus, y_pred_whole, model) #def f_dec_tab_4_bg_svm(X, y, clus): # '''", "are the clusters it belongs to, thus we also need", "the same result. ''' n = len(A) if p0 ==", "= distance.euclidean(X[i], X[j]) sim[i][j] = tmp sim[j][i] = tmp sim", "X[clus[j].flatten(), :] yj = y[clus[j].flatten() ] model_a_clust = copy.deepcopy(model) print('", "y_pred y_pred = f_quantileNorm(templete, target) # copy the normed prediction", "#bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ # bagging = BaggingClassifier(BaseBagging,", "else: if l2norm == pl2norm: unchanged = unchanged +1 if", "y) for train_index, test_index in skf.split(X, y): #print(\"TRAIN: \", train_index,", "is default, the result is: % [0.449, 0.207, 0.220, 0.064,", "datasets ''' sim = np.zeros(( len(X1), len(X2) )) for i", "1: y_pred_te = np.ones(len(X_te)) else: y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except", "Predict each cluster using the whole data. model_whole = copy.deepcopy(model)", "0.3289], \\ # [0.5566, 0, -0.0842, -0.0170], \\ # [0.6448,", "numpy as np #from scipy import io import scipy.io #import", "pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa = '/Users/zg/Dropbox/bio/ICE_2018/' # pa =", "np.zeros((y.size, 1)) # # skf = StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X, y)", "the whole data. model_whole = copy.deepcopy(model) y_pred_whole = f_k_fo(X, y,", "= 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100 w = 0.4 s =", "[clus, models, dec_ixs] #def_deal_miss_v_1(d): ''' deal with missing values by", "' finished') fold_i = fold_i + 1 auc_ICE = roc_auc_score(y.flatten(),", "True, False, False, True], # [False, False, True, False]]) #", "1x5 cell #% 1x195 double 1x193 double 1x169 double 1x161", "y_pred[:, 1].flatten() # quantileNorm templete = y_pred_whole[ix_other] target = y_pred", "#cutoffs = [] all_neibs = int( avgNeighborsSize * len1 )", "y.astype(float) #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ #model = BaggingClassifier(base_estimator", "counts the length of each double array. lens = np.zeros(len(c1))", "return [err_mat, models] \"\"\" #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat", "p0 = np.eye(n) ''' % In the example above, spdiags(sum(A)'.^(-1),", "+ 1 to make the ix fit the col id", "1000 and laziness is 0.3, p0 is default, the result", "0.2500 0 0 0 0 % 0 0 0 0.3333", ", -1)[np.newaxis], \\ 0, n, n).toarray() ) p = p0", "This version use the err mat to re-clustering ''' #", "nSteps, lazi, None) ''' # test #dst = distance.euclidean(A) #", "----------------- ix_other = set(range(0, y.size)) - set(clus[j].flatten()) ix_other = list(ix_other)", "cluster-cluster dec_mat to instance-cluster # % dec_mat. This solution will", "len(yj) <= 15: print (' '+str(len(yj))+' insts in cluster, <=", "col, the result is by each cluster for i in", "0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;... % 0.220, 0.167,", "initial probability. usually it is a zero matrix with the", "scipy.io #import pickle from sklearn.model_selection import StratifiedKFold #import sklearn from", "used. dec_ixs = [] inst_clus = f_tfs_2_instClus(tfs) for i in", "neighbour_mat[:, j].flatten() ix = np.argsort(neighbour_col ) ix = ix[::-1] ix_top_neighbors", "== 1') y_pred = np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j] = y_pred", "a model using all data rather than only a fold", "clus_ordered #test #clus = f_fuzzy_rwr_clusters(X, 100) # pass def f_clus_to_tfs(clus,", "= spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray() W =", "''' #W = A * spdiags(sum(A)'.^(-1), 0, n, n); #W", "range(0,100) ) b = np.matlib.repmat(a, 100, 1) ct = getCutoff(b,", "Matlab code: #dec_row = dec_mat(cur_nb_ix, :); #dec_row(:, end ) =", "range(1, 50): for j in range(1, 49): try: X =", "= np.ceil(np.log10(len(sim)) ) ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors) print(' done calculating", "n_partial = len(clus_ids_to_use) # number of whole models used, based", "import scipy.io #import pickle from sklearn.model_selection import StratifiedKFold #import sklearn", "aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet, False) return aRankNet # test #sim =", "(' '+str(len(yj))+' insts in cluster, <= 15, skip...') y_pred =", "the support for fuzzy clustering - one instance may #", "print (' Done calculating error table and fitting ICE models')", "them by mean. ''' def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4,", "import metrics from sklearn.metrics import roc_auc_score from sklearn import tree", "> 0: dec_mat[i, ix_good_clus] = True dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return", "sim = sim-diag(diag(sim) ); np.fill_diagonal(sim, 0) # [~, I] =", "% 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;... % 0.017,", "% 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;... % 0.220,", "clusters ''' The return variable clus stores the instance indices", "''' A = np.array([[0,2,2,0,0,0,0],\\ [2,0,1,1,0,0,0],\\ [2,1,0,0,1,0,0],\\ [0,1,0,0,0,1,1],\\ [0,0,1,0,0,0,0],\\ [0,0,0,1,0,0,1],\\ [0,0,0,1,0,1,0]])", "# return [y_pred, auc] def f_tr(X_tr, y_tr, model): model_inner =", "# remove the diagonal 1s. # aRankNet = aRankNet-diag(diag(aRankNet) );", "W.dot(p) + laziness * p0 l2norm = max(np.sqrt(sum((pnew - p)", "to that node. % This algorithm will converge. For example,", "testing instance # find the top 10 neighbors for each", "1 )) # the first col is the prediction using", "f_tr_te(Xi, yi, X_te, model) if doNorm == True: templete =", "skf.split(X, y): #print(\"TRAIN: \", train_index, \" TEST: \", test_index) X_tr,", "#% #% clus e.g: #% 1x5 cell #% 1x195 double", "a = np.array( range(0,100) ) b = np.matlib.repmat(a, 100, 1)", "#% #% tfs e.g: #% 295x5 double #% 1 0", "test #inst_clus = f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te): #", "n_iter = 2 for i in range(0, n_iter): clus =", "= f_tr_te(X_tr, y_tr, X_te, model) y_preds_whole[test_index] = y_pred_whole print( j)", "\\ 0, n, n).toarray() ) p = p0 pl2norm =", "None: # on average, how many clusters does one inst", "each cluster for i in range(0, len(clus)): #Xi = X_tr[clus[i].flatten(),", "#[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models,", "== 1: print (' warning, #unique class label(s) == 1')", "0) # [~, I] = sort(sim-diag(diag(sim) ) ); I =", "# auc = roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc]", ") [err_mat, models] = f_err_mat(X, y, clus, model) \"\"\" def", "= dec_row(:, end ) - adv_whole; #dec_row(:, clus_id) = dec_row(:,", "be used later as KNN pred # instead of using", "0.220, 0.064, 0.154, 0.034, 0.034;... % 0.207, 0.425, 0.167, 0.132,", "whole instances, while self % prediction using 10-fold CV. corresponds", "= y.flatten() y_pred = np.zeros(y.size) skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True)", "is by each cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models)", "length of each double array. lens = np.zeros(len(c1)) for i", "y_pred[test_index] = y_pred_fo #auc = roc_auc_score(y.flatten(), y_pred) return y_pred #", "first col is the prediction using the whole data model_whole", "using the predicted prob at first. # # % This", "= [] for i in range(0, len(tfs)): row = list(", "1. % % for example, A could be: % A", "str(fold_i) + ' finished') fold_i = fold_i + 1 auc_ICE", "in ix] print(' center inst. index of each cluster: ')", "0 0 0 0 % 0 0 0.2500 0 0", "n).toarray() ) p = p0 pl2norm = np.inf unchanged =", ")) for i in range(0, len(X)): for j in range(i+1,", "% has k edges to other nodes (aRank). ''' #", "model_inner def f_te(X_te, model): y_pred = model.predict_proba(X_te) y_pred = y_pred[:,", "the error-dicision tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator =", "'+str(each_clus_sz) ) # sim = squareform(pdist(X)); # sim = -sim;", "if l2norm == pl2norm: unchanged = unchanged +1 if unchanged", "y_tr) # # y_pred = bagging.predict_proba(X_te) # y_pred = y_pred[:,", "cluster using the whole data. model_whole = copy.deepcopy(model) y_pred_whole =", "node np.fill_diagonal(rw, 0) rw_mat = rw print(' done RWR') #", "0 0 0 0.3333 0 0 0.5000 % 0 0", "True], # [ True, False, True, False]]) # #array([[False, True,", "y_te) ''' #def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging): # '''", "#target = X[:, 1] #new = f_quantileNorm(templete, target) #def f_bg_k_fo_3(X,", "\\ # random_state=None, n_estimators = 100 ) #y_pred = f_k_fo(X,", "% 0.5000 0 0.2500 0.3333 0 0 0 % 0.5000", "that if one cluster decision # % is wrong leading", "5 alpha = 1 beta = 1 k_fold = 10", "#sim = np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus = 100 #model =", ") #y_pred = f_k_fo(X, y, model, k_fold=10) # #print roc_auc_score(y.flatten(),", "import roc_auc_score from sklearn import tree import copy import numpy.matlib", "each cluster ') # --------------------------- SELF ------------------------- # predict the", "if np.unique(y_tr).size == 1: y_pred_fo = np.zeros( len(test_index) ) #print", "= y.astype(float) #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ #model =", "sklearn import svm #from sklearn import metrics from sklearn.metrics import", "0.5000 % W will be: % 0 0.5000 0.5000 0", "nSteps, laziness, p0 = None): ''' % the random walk", "% not good for fuzzy clusters. % k is the", "y_pred if np.unique(yj).size == 1: print (' warning, #unique class", "0 0.2500 0 0 0 0 % 0 0 0", "the clusters it belongs to, thus we also need to", "in Matlab version # ''' # #n_clusters = len(clus) #", "The return variable clus stores the instance indices for each", "# # # --------------------------- SELF ------------------------- def f_err_mat(X, y, clus,", "BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators = 100 ) model_inner =", "the rw_mat matrix, find some nodes as the centroids for", "True, True], # [ True, False, False, True], # [", "a very different range than the templete. templete and target", "i % 100 == 0: print(' done rwr ' +", "ix_good_clus if len(ix_good_clus) > 0: dec_mat[i, ix_good_clus] = True dec_ixs.append(ix_good_clus)", "for whole pred # # # ## k_fold of inner", "# return [y_pred, auc] # test ''' X_tr = X", "1 1 1 1 1 #% 1 0 0 0", "whole data. model_whole = copy.deepcopy(model) y_pred_whole = f_k_fo(X, y, model_whole,", "model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ random_state=None, n_estimators = 100", "0) #n_clus = 100 #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\", "y_pred = np.zeros(y.size) skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True) skf.get_n_splits(X, y)", "in range(0, len(X)): for j in range(i+1, len(X)): tmp =", "len(clus_ids_to_use) # number of whole models used, based on parameters", "= A * spdiags(sum(A)'.^(-1), 0, n, n); #W = spdiags(np.power(sum(np.float64(A))", "aucs_ICE = [] aucs_whole = [] # f_res = pa", "y_tr, X_te, clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE y_pred_whole =", "picks ---------- clus_ids_to_use = [] nei_labels = [] for cur_nb", "KNN pred # instead of using whole to pred. nei_labels", "cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer", "= scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']", "# sim = squareform(pdist(X)); # sim = -sim; sim =", "''' This version use the err mat to re-clustering '''", "f_err_mat(X_tr, y_tr, clus, model) # ******************************************************* [dec_mat, dec_ixs] = f_err_2_decMat(err_mat,", "#print roc_auc_score(y.flatten(), y_pred) # the easy dataset mesothelioma get 1.0", "0.2500 0.3333 0 0 0 % 0.5000 0.2500 0 0", "import distance #import matplotlib.pyplot as plt from sklearn.ensemble import BaggingClassifier", "# delete the diagnal values. # sim = sim-diag(diag(sim) );", "-------- nSteps = 1000 lazi = 0.3 rw = RWR(ori_graph,", "n by 1 array. f_my_quantileNorm() ''' ix_target = np.argsort(target, kind='mergesort')", "models.append(model_whole) pred_prob_mat[:, 0:n_clusters] = y_pred_multi # make a tmp array", "+ 1 )) # the first col is the prediction", "= np.matlib.repmat(a, 100, 1) ct = getCutoff(b, 70) ''' def", "'whole model good ' # start from the second col,", "def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' ''' #", "f_clus_to_tfs(clus, len(X)) # pass def f_tfs_2_instClus(tfs): ''' convert the boolean", "#print clus_ids_to_use # cluster id + 1 to make the", "pred by whole data #models = [] # k_fold of", "a fold pred_prob_mat[:, n_clusters] = y_pred_whole print (' Done evaluation", "that clus is the instances ids for each cluster, while", "y_tr, y_te = y[train_index], y[test_index] [clus, models, dec_ixs] = f_ICE_fit(X_tr,", "b = np.matlib.repmat(a, 100, 1) ct = getCutoff(b, 70) '''", "array y_pred_multi[ix_other, j] = y_pred print(' c-'+str(j)+' done predicting remote", "clus e.g: #% 1x5 cell #% 1x195 double 1x193 double", "clus_ids_to_use = [] nei_labels = [] for cur_nb in range(0,", "np.argsort(lens)[::-1] clus_ordered = [clus[i] for i in ix] print(' center", "I2 = (np.argsort(I, kind='mergesort').T + 1).T # for every column,", "= rw print(' done RWR') # --------------------------------------------------------------- ixs_centers = f_find_centers_rwMat(rw_mat,", "') ixs_centers = np.array(ixs_centers) print(ixs_centers[ix]) print(' size of each cluster:", "= f_k_fo(X, y, model_whole, fo_inner) model_whole.fit(X, y) # fit a", "ix_other X_other = X[ix_other , :] #y_other = y[ix_other ]", "# for each testing instance # find the top 10", "pick cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use = list( dec_ixs[cur_nb_ix] ) clus_ids_to_use", "def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' This version", "y_pred_multi = np.zeros((y.size, n_clusters) ) models = [] for j", "This solution will avoid the case that if one cluster", "one cluster decision # % is wrong leading entrie cluster", "% p0: the initial probability. usually it is a zero", "[ True, False, True, False]]) # #array([[False, True, True, True],", "# test #tmp = f_find_centers_rwMat(rw_mat, 10) def getCutoff(rw_mat, avgNeighborsSize): tmp", "top 10 neighbors for each test instance neighbour_col = neighbour_mat[:,", "# ''' # corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version #", "') print(lens[ix]) print(' done RWR clustering') return clus_ordered #test #clus", "plt from sklearn.ensemble import BaggingClassifier from sklearn import svm #from", "to f_weka_bg_svm_tr_te() in Matlab version # ''' # #bagging =", "y_tr, clus, model) # ******************************************************* [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs,", "#except NotFittedError as e: # print(repr(e)) # y_pred_te = np.zeros(len(X_te))", "return ixs # test #tmp = f_find_centers_rwMat(rw_mat, 10) def getCutoff(rw_mat,", "find neighbor's label. maybe will be used later as KNN", "\\ # bagging = BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators =", "np.ones(len(X_te)) else: y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError as e:", "tmp array a stores y tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1,", "X_te, clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE y_pred_whole = f_tr_te(X_tr,", "y): # print(\"TRAIN:\", train_index, \"TEST:\", test_index) X_tr, X_te = X[train_index],", "# for every column, just keep the top k edges.", "<= 15, skip...') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] =", "# fit a model using all data rather than only", "X[test_index] #y_tr, y_te = y[train_index], y[test_index] y_tr = y[train_index] if", "at first. # # % This version 3 adds the", "new = target return new # test #templete = X[:,", ", 0, n, n).toarray() W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis],", "svm.LinearSVC(), \\ model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ random_state=None, n_estimators", "RWR(A, nSteps, laziness, p0 = None): ''' % the random", "(' Done evaluation using each cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters] =", "): for j in range(0, len(X2) ): tmp = distance.euclidean(X1[i],", "def f_tr_te(X_tr, y_tr, X_te, model): ''' corresponds to f_weka_bg_svm_tr_te() in", "p0: the initial probability. usually it is a zero matrix", "for each test instance neighbour_col = neighbour_mat[:, j].flatten() ix =", "clus, model): ''' Calculate the decision table corresponds to f_dec_tab_4_bg_svm()", "represents the propability for each node. each element in the", "b[ixs] = np.inf # % find the farthest node ix", "y, model_whole, fo_inner) model_whole.fit(X, y) # fit a model using", "python2 # -*- coding: utf-8 -*- \"\"\" Created on Mon", "np.unique(yj).size != 1: model_a_clust.fit(Xj, yj) y_pred = model_a_clust.predict_proba(X_other) y_pred =", "True, False]]) # #array([[False, True, True, True], # [ True,", "and fitting ICE models') return [err_mat, models] \"\"\" #mat =", "1: model_a_clust.fit(Xj, yj) y_pred = model_a_clust.predict_proba(X_other) y_pred = y_pred[:, 1].flatten()", "model) y_preds_whole[test_index] = y_pred_whole print( j) print( 'fold ' +", "np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus = 100 #model = BaggingClassifier(base_estimator =", "= '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa = '/Users/zg/Dropbox/bio/ICE_2018/' # pa = './'", "which cluster(s) it belongs to. dec_ixs stores the good cluster(s)", "scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer ##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X']", "fo_inner) model_whole.fit(X, y) # fit a model using all data", "''' sim = np.zeros(( len(X1), len(X2) )) for i in", "clusters lens = f_len_of_each_ele(clus) ix = np.argsort(lens)[::-1] clus_ordered = [clus[i]", "= scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y']", "#print ix_other X_other = X[ix_other , :] #y_other = y[ix_other", "many steps to walk % laziness: the probablity to go", "= -sim; sim = np.zeros((len(X), len(X) ) ) for i", "#test #clus = f_fuzzy_rwr_clusters(X, 100) # pass def f_clus_to_tfs(clus, n_inst):", "y_pred = model.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() return y_pred def", "predict Use each cluster of training data to predict the", "% 0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0] % %", "cluster(s) it belongs to. dec_ixs stores the good cluster(s) for", "number of whole models used, based on parameters alpha, beta", "0.5 N = 5 alpha = 1 beta = 1", "f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5): ''' Convert the err table to", "based on parameters alpha, beta and N. n_whole = int(", "roc_auc_score(y_te.flatten(), y_pred) return y_pred def f_k_fo(X, y, model, k_fold=10): '''", "0.004;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;... %", "# k: number of clusters ''' The return variable clus", "2 for i in range(0, n_iter): clus = f_fuzzy_rwr_clusters(err_mat, n_clus)", "for each cluster. However, this data structure is not easy", "the first col is the prediction using the whole data", "#% what is the top 1/2/3 cluster it belongs to.", "instances ''' sim = np.zeros(( len(X), len(X) )) for i", "cluster it belongs to. #% #% clus e.g: #% 1x5", "y_tr, X_te, model): ''' corresponds to f_weka_bg_svm_tr_te() in Matlab version", "= RWR(ori_graph, nSteps, lazi) # remove probability of returning start", "% [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;... % 0.207,", "clus = f_fuzzy_rwr_clusters(err_mat, n_clus) tfs = f_clus_to_tfs(clus, len(X_tr)) [err_mat, models]", "If we just random pickup some nodes as centroids, that", "aucs_whole.append(auc_whole) f.write(str(j) + '\\t' + str(auc_ICE) + ' \\t '", "#for j in range(1, 50): for j in range(1, 49):", "CV. corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version ''' n_clusters =", "double 1x193 double 1x169 double 1x161 double 1x62 double #%", "#n_partials = np.zeros( len(X_te) ) #n_wholes = np.zeros( len(X_te) )", "BaggingClassifier from sklearn import svm #from sklearn import metrics from", "beta*N ) ) clus_ids_to_use = clus_ids_to_use + [0] * n_whole", "1-dimension cell array, and each element is a 1d double", "done rwr ' + str(i-1) ) pnew = (1-laziness) *", "i in range(0, len(clus)): tfs[clus[i], i] = True return tfs", "X[j]) sim[i][j] = tmp sim[j][i] = tmp sim = -sim", "models] = f_err_mat(X_tr, y_tr, clus, model) # ******************************************************* [dec_mat, dec_ixs]", "0 0 0 0 0 0.5000 % W will be:", "True, False]]) def f_find_centers_rwMat(rw_mat, k): ''' % on the rw_mat", "# % normalization take place in y_pred_self and y_pred_other, thus", "% 2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;...", "l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) ) p", "y_te, BaseBagging): # ''' # corresponds to f_weka_bg_svm_tr_te() in Matlab", "0 0.5000 0.5000 0 0 0 0 % 0.5000 0", "else: if len(yj) <= 15: print (' '+str(len(yj))+' insts in", "import svm #from sklearn import metrics from sklearn.metrics import roc_auc_score", "# random_state=None, n_estimators = 100 ) # bagging.fit(X_tr, y_tr) #", "clus_id) - adv_self; row = np.copy( err_mat[i, :] ) #print", "BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ random_state=None, n_estimators = 100 ) [err_mat,", "0 0 0 % 0.5000 0.2500 0 0 1.0000 0", "= np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0]) #else: try: y_pred = f_k_fo(Xj, yj,", "lens = np.zeros(len(c1)) for i in range(0, len(c1)): lens[i] =", "= '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer ##y =", "inner cross-validation # #fo_inner = 10 # # --------------------------- WHOLE", "= np.argsort(sim, kind='mergesort') + 1 # [~, I2] = sort(I);", "len(rw_mat) #cutoffs = [] all_neibs = int( avgNeighborsSize * len1", "= int( avgNeighborsSize * len1 ) print( all_neibs) ct =", "But for each #% instance, the rank of clusters information", "lazi) # remove probability of returning start node np.fill_diagonal(rw, 0)", "# y_pred_te.fill(np.nan) #print 'model '+str(i)+' good ' #y_pred_te = f_tr_te(Xi,", "# # % This version 3 adds the support for", "0.425, 0.203;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425]", ") y_preds_whole = np.zeros( y.size ) fold_i = 1 for", "cell #% 1x195 double 1x193 double 1x169 double 1x161 double", "0 0 0 #% 1 1 1 1 0 #%", "--------------------------- WHOLE ------------------------- # Predict each cluster using the whole", "to walk % laziness: the probablity to go back. %", "% 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;... % 0.017,", "for each instance, what clusters it belongs to. ''' inst_clus", "center inst. index of each cluster: ') ixs_centers = np.array(ixs_centers)", "each cluster # the last col stores the pred by", "y_pred = np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j] = y_pred model_a_clust =", "tree.DecisionTreeClassifier(), \\ #model = BaggingClassifier(base_estimator = svm.LinearSVR(), \\ #model =", "# 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model = BaggingClassifier(base_estimator =", "0 0 0.3333 0 0 0 % 0 0 0", "''' # test #dst = distance.euclidean(A) # corrent, the same", "# % -------- RWR -------- nSteps = 1000 lazi =", "''' # rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus)", "np.inf # % find the farthest node ix = np.argmin(b)", "roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] # test '''", "double #% array. This function counts the length of each", "f_err_2_decMat(err_mat, tfs, w, s) print (' Done calucating decision table')", "False, True], # [ True, True, True, False]]) def f_find_centers_rwMat(rw_mat,", "len(err_mat)): # Matlab code: #dec_row = dec_mat(cur_nb_ix, :); #dec_row(:, end", "% 0 0 0 0 1.0000 0 0 % 0", "# % find the farthest node ix = np.argmin(b) ixs.append(ix)", "# pass def f_clus_to_tfs(clus, n_inst): #% convert the cluster information", "p0 = None): ''' % the random walk algorithm. %", "print(' center inst. index of each cluster: ') ixs_centers =", "instance neighbour_col = neighbour_mat[:, j].flatten() ix = np.argsort(neighbour_col ) ix", "example, A could be: % A = [0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;...", "convert clus to a true-false matrix. ''' if each_clus_sz ==", "will be: % 0 0.5000 0.5000 0 0 0 0", "True, True, True, False]]) def f_find_centers_rwMat(rw_mat, k): ''' % on", "# axis=1 for rows; 0 for col # % most", "#y_tr, y_te = y[train_index], y[test_index] y_tr = y[train_index] if np.unique(y_tr).size", "will be % 0.2500 0 0 0 0 0 0", "n, n).toarray() W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \\ 0,", "the normed prediction to the whole data. y_pred_multi[clus[j].flatten(), j] =", "clus = f_fuzzy_rwr_clusters(X, n_clus) tfs = f_clus_to_tfs(clus, len(X)) y =", "range(0, len(c1)): lens[i] = len(c1[i]) return lens def f_eu_dist(X): '''", "test instance neighbour_col = neighbour_mat[:, j].flatten() ix = np.argsort(neighbour_col )", "# cluster id + 1 to make the ix fit", "= '+str(k)+'; avg. cluster size = '+str(each_clus_sz) ) # sim", "y.flatten() y_pred = np.zeros(y.size) skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True) skf.get_n_splits(X,", "templete and target should be 1d n by 1 array.", "StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True) skf.get_n_splits(X, y) for train_index, test_index in skf.split(X,", "= copy.deepcopy(model) y_pred_whole = f_k_fo(X, y, model_whole, fo_inner) model_whole.fit(X, y)", "values by replacing them by mean. ''' def f_ICE_fit_2(X_tr, y_tr,", "np.unique(yj).size != 1: # ------------------ for self ------------------ #if np.unique(yj).size", "in range(0, len(err_mat)): # Matlab code: #dec_row = dec_mat(cur_nb_ix, :);", "the whole set of instances. ''' # the first col", "node % has k edges to other nodes (aRank). '''", "col stores the pred by whole data #models = []", "calculate the euclidean distance between instances from two datasets '''", "y_tr = y[train_index] if np.unique(y_tr).size == 1: y_pred_fo = np.zeros(", "1') y_pred = np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j] = y_pred model_a_clust", "0.8405, 0]]) # #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) # #array([[False, True, True,", "X_te = X[train_index], X[test_index] # y_tr, y_te = y[train_index], y[test_index]", "ix_good_clus] = True dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return [dec_mat, dec_ixs] #[dec_mat,", "1)), 1, n_clusters+1) err_mat = abs(pred_prob_mat - tmp ) print", "models,N=5,alpha=1,beta=1): ''' clus and inst_clus contains the same information that", "= model_inner.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() #auc = roc_auc_score(y_te.flatten(), y_pred)", "class label(s) == 1') y_pred = np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j]", "tree import copy import numpy.matlib from sklearn.exceptions import NotFittedError #import", "[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # ******************************************************* [dec_mat,", "[clus, models, dec_ixs] def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1):", "alpha, beta and N. n_whole = int( round( alpha*n_partial +", "-sim return sim #test #sim = f_eu_dist2(X_tr, X_te) def f_fuzzy_rwr_clusters(X,", "a = np.array([[1,2], [3,4]]) #>>> a.flatten() #array([1, 2, 3, 4])", "# --------------------------------------------------------------- aRank_k_neighbors = np.ceil(np.log10(len(sim)) ) ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors)", "0 % 0 0 0 0.3333 0 0 0.5000 %", "a.flatten() #array([1, 2, 3, 4]) #>>> a.flatten('F') #array([1, 3, 2,", "and each element is a 1d double #% array. This", "= svm.LinearSVC(), \\ #bagging = BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators", "to instance-cluster # % dec_mat. This solution will avoid the", "fo_inner = 5 # --------------------------- WHOLE ------------------------- # Predict each", "f_tr_te(X_tr, y_tr, X_te, model) #print 'whole model good ' #", "X_tr, X_te = X[train_index], X[test_index] y_tr, y_te = y[train_index], y[test_index]", "n_estimators = 100 ) model = svm.SVC(kernel='linear', probability = True)", "= True dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return [dec_mat, dec_ixs] #[dec_mat, dec_ixs]", "y[train_index], y[test_index] y_tr = y[train_index] if np.unique(y_tr).size == 1: y_pred_fo", "0.232, 0.232;... % 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;...", "the ix fit the col id in y_pred_all a =", "= A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \\ 0, n, n).toarray() )", "0.5000 0 ''' #W = A * spdiags(sum(A)'.^(-1), 0, n,", "str(i-1) ) pnew = (1-laziness) * W.dot(p) + laziness *", "just keep the top k edges. #aRankNet = (I2 >length(sim)-k);", "matrix. ''' if each_clus_sz == None: # on average, how", "= 1000 lazi = 0.3 rw = RWR(ori_graph, nSteps, lazi)", "y_pred_te = np.ones(len(X_te)) else: y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError", "% 0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0] % % if nSteps is 1000", "= max(aRankNet, aRankNet'); aRankNet = np.logical_or(aRankNet, aRankNet.T) # remove the", "s) print (' Done calucating decision table') return [clus, models,", "is: % [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;... %", "input net matrix, with the diag to be 0. %", "0.064, 0.154, 0.034, 0.034;... % 0.207, 0.425, 0.167, 0.132, 0.117,", "0 0 0.5000 % W will be: % 0 0.5000", "lens = f_len_of_each_ele(clus) ix = np.argsort(lens)[::-1] clus_ordered = [clus[i] for", "\\ # random_state=None, n_estimators = 100 ) # bagging.fit(X_tr, y_tr)", "data. Use whole training data to predict Use each cluster", "#dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self; row = np.copy(", "pnew if l2norm < np.finfo(float).eps: break else: if l2norm ==", "] ).flatten() clus.append(tmp) # --------------------------------------------------------------- # % sort the clusters", "as e: # print(repr(e)) # y_pred_te = np.zeros(len(X_te)) # y_pred_te.fill(np.nan)", "n_clusters): # for each cluster Xj = X[clus[j].flatten(), :] yj", "each #% instance, the rank of clusters information will be", "-1)[np.newaxis], \\ 0, n, n).toarray() ) p = p0 pl2norm", "''' ''' # rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr,", "k: number of clusters ''' The return variable clus stores", ") #print ' clus_ids_to_use:' #print clus_ids_to_use # cluster id +", "= X[train_index], X[test_index] # y_tr, y_te = y[train_index], y[test_index] def", "= svm.LinearSVC(), \\ # bagging = BaggingClassifier(BaseBagging, \\ # random_state=None,", "y_tr = y X_te = X y_te = y [y_pred,", "c-'+str(j)+' done predicting local instances') # ------------------ for other -----------------", "matrix, with the diag to be 0. % nSteps: how", "Done calucating decision table') return [clus, models, dec_ixs] #def_deal_miss_v_1(d): '''", "= 1 beta = 1 k_fold = 10 aucs_ICE =", "'fold ' + str(fold_i) + ' finished') fold_i = fold_i", "clus_ids_to_use = a # number of partial models used n_partial", "dec_row(:, clus_id) - adv_self; row = np.copy( err_mat[i, :] )", "% for example, A could be: % A = [0,2,2,0,0,0,0;...", "the number of centroids. ''' ixs = [] # 1.", "clustering') return clus_ordered #test #clus = f_fuzzy_rwr_clusters(X, 100) # pass", "0 #% ... #% 1 1 1 1 1 #%", "dec_mat. This solution will avoid the case that if one", "the whole instances using each cluster data, while self #", "N = 5 alpha = 1 beta = 1 k_fold", "0.034, 0.034;... % 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;...", "to. ''' inst_clus = [] for i in range(0, len(tfs)):", "#import sklearn from scipy.sparse import spdiags from scipy.spatial import distance", "0.035, 0.014, 0.154, 0.009, 0.203, 0.425] % % Each column", "np.sum(tmp, axis=1) b[ixs] = np.inf # % find the farthest", "1 1 1 0 0 tfs = np.zeros((n_inst, len(clus)), dtype=bool)", "to make the ix fit the col id in y_pred_all", "clus_ids_to_use a = list( np.array(a) + 1 ) clus_ids_to_use =", "good for fuzzy clusters. % k is the number of", "= sort(sim-diag(diag(sim) ) ); I = np.argsort(sim, kind='mergesort') + 1", "centroids for soft % clustering. If we just random pickup", "clus to a true-false matrix. ''' if each_clus_sz == None:", "BaggingClassifier(base_estimator = svm.LinearSVC(), \\ # bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(),", "corresponds to f_weka_bg_svm_tr_te() in Matlab version ''' #bagging = BaggingClassifier(base_estimator", "roc_auc_score(y.flatten(), y_pred) # the easy dataset mesothelioma get 1.0 CV", "clus_ids_to_use #print nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done predicting", "RWR(A, nSteps, lazi, None) ''' # test #dst = distance.euclidean(A)", "(I2 >length(sim)-k); aRankNet = I2 > (len(sim) - k) #", "W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \\ 0, n, n).toarray()", "0.154, 0.034, 0.034;... % 0.207, 0.425, 0.167, 0.132, 0.117, 0.071,", "0 0 0 0 % 0.5000 0 0.2500 0.3333 0", "#sim = np.array([[0, 0.5566, 0.6448, 0.3289], \\ # [0.5566, 0,", "whole data. y_pred_multi[clus[j].flatten(), j] = y_pred print(' c-'+str(j)+' done predicting", "y_tr, n_clus, model, w=0.4, s=0.5): ''' ''' # rwr based", "2. iteratively find the rest nodes for i in range(1,", "instances, while self % prediction using 10-fold CV. corresponds to", "True, True, False]]) def f_find_centers_rwMat(rw_mat, k): ''' % on the", "model_a_clust.fit(Xj, yj) y_pred = model_a_clust.predict_proba(X_other) y_pred = y_pred[:, 1].flatten() #", "% the random walk algorithm. % A is the input", "adv_self=0.5): ''' Convert the err table to decision table. '''", "X[:, 0] #target = X[:, 1] #new = f_quantileNorm(templete, target)", "= [0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;...", "whole training data to predict Use each cluster of training", "boolean table representation of clustering result to for each instance,", "--------------------------- SELF ------------------------- def f_err_mat(X, y, clus, model): ''' Calculate", "belongs to. #% #% clus e.g: #% 1x5 cell #%", "def f_te(X_te, model): y_pred = model.predict_proba(X_te) y_pred = y_pred[:, 1].flatten()", "print(' done calculating the Euclidean distance matrix') # --------------------------------------------------------------- aRank_k_neighbors", "= BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ random_state=None, n_estimators = 100 )", "np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0]) #else: try: y_pred = f_k_fo(Xj, yj, model,", "# # --------------------------- SELF ------------------------- def f_err_mat(X, y, clus, model):", "the testing data. ''' y_pred_all = np.zeros(( len(X_te), len(clus) +", "the result is by each cluster for i in range(0,", "model) # ******************************************************* [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)", "farthest node ix = np.argmin(b) ixs.append(ix) return ixs # test", "graph where each node % has k edges to other", "''' % Convert the similarity matrix to a network graph", "random pickup some nodes as centroids, that is % not", "from sklearn import tree import copy import numpy.matlib from sklearn.exceptions", "= getCutoff(b, 70) ''' def f_len_of_each_ele(c1): #% Assume c1 is", "for fuzzy clusters. % k is the number of centroids.", "will try using the predicted prob at first. # #", "skf.split(X, y): # print(\"TRAIN:\", train_index, \"TEST:\", test_index) X_tr, X_te =", "clus_ids_to_use:' #print clus_ids_to_use # cluster id + 1 to make", "y_pred_all[:, 0] = f_te(X_te, model_whole) #y_pred_all[:, 0] = f_tr_te(X_tr, y_tr,", "+1 if unchanged > 10: break else: unchanged = 0", "#pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer ##y", "cancer ##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y", "predicting remote instances') except ValueError as e: print(e) print(' skip", "as e: print(e) print(' skip this cluster') y_pred = np.zeros(y.size)", "+ str(i-1) ) pnew = (1-laziness) * W.dot(p) + laziness", "clus, \\ y_pred_whole, model, fo_inner) print (' Done evaluation using", "sim = np.zeros(( len(X1), len(X2) )) for i in range(0,", "go to that node. % This algorithm will converge. For", "#array([1, 2, 3, 4]) #>>> a.flatten('F') #array([1, 3, 2, 4])", "y_pred model_a_clust = np.unique(yj)[0] models.append(model_a_clust) return [y_pred_multi, models] # test", "each_clus_sz=len(X)/3 print('RWR-based fuzzy clustering starts...') print(' NO. clusters = '+str(k)+';", "This updated version also outputs the predicted values of y.", "model_i) except : if model_i == 0: y_pred_te = np.zeros(len(X_te))", "nSteps = % 100, 1000 or 10000, will give the", "quantileNorm templete = y_pred_whole[clus[j].flatten()] target = y_pred y_pred = f_quantileNorm(templete,", "euclidean distance between instances ''' sim = np.zeros(( len(X), len(X)", "convert the cluster information from cell array to mat. But", "evaluation using each cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters] = y_pred_multi #", "[] # f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa", "above matrix, nSteps = % 100, 1000 or 10000, will", "len(X) )) for i in range(0, len(X)): for j in", "node ix = np.argmin(b) ixs.append(ix) return ixs # test #tmp", "# [ True, False, False, False], # [ True, False,", "a remote cluster. each instance in dec_ixs does not contain", "neighbour_col = neighbour_mat[:, j].flatten() ix = np.argsort(neighbour_col ) ix =", "0.5000 0 % 0 0 0 0 0 0 0.5000", "model) [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print ('", "np.zeros((y.size, n_clusters) ) models = [] for j in range(0,", "range(1, k): tmp = rw_mat[:, ixs] b = np.sum(tmp, axis=1)", "= BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ #model = BaggingClassifier(base_estimator = svm.LinearSVR(),", "from sklearn.model_selection import StratifiedKFold #import sklearn from scipy.sparse import spdiags", "is the instances ids for each cluster, while inst_clus stores", "random_state=None, n_estimators = 100 ) # bagging.fit(X_tr, y_tr) # #", "lazi = 0.3 RWR(A, nSteps, lazi, None) ''' # test", "extra col is for whole pred # col 0 to", "# k_fold of inner cross-validation fo_inner = 5 # ---------------------------", "ixs.append(ix) # % 2. iteratively find the rest nodes for", "diagnal values. # sim = sim-diag(diag(sim) ); np.fill_diagonal(sim, 0) #", "[y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te) ''' #def f_bg_tr_te(X_tr,", "@author: zg \"\"\" import numpy as np #from scipy import", "np.zeros(( len(X_te), len(clus) + 1 )) # the first col", "= f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s) y_pred_ICE = f_ICE_pred(X_tr,", "number of centroids. ''' ixs = [] # 1. find", "= './' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100 w =", "f_clus_to_tfs(clus, len(X)) y = y.astype(float) #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(),", "0 0 % 0 0 0 0 0 0.5000 0", "for j in range(0, n_clusters): # for each cluster Xj", "'w') #for j in range(1, 50): for j in range(1,", "2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0] %", "1 1 0 0 #% 1 1 0 0 0", "ixs_centers = f_find_centers_rwMat(rw_mat, k) ct = getCutoff(rw_mat, each_clus_sz) rw_net =", "table representation of clustering result to for each instance, what", "#print nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done predicting testing", "the probability to go to that node. % This algorithm", "tmp = distance.euclidean(X[i], X[j]) sim[i][j] = tmp sim[j][i] = tmp", "the top 1/2/3 cluster it belongs to. #% #% clus", ") - adv_whole; #dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self;", "1 beta = 1 k_fold = 10 aucs_ICE = []", "0.004, 0.004;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;...", "0 0 0.3333 0 0.5000 0 ''' #W = A", "np.argmin(b) ixs.append(ix) return ixs # test #tmp = f_find_centers_rwMat(rw_mat, 10)", "# ''' # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ #", "\\ # [0.3289, -0.0170, 0.8405, 0]]) # #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3)", "= svm.SVC(kernel='linear', probability = True) skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y)", "len(X)) # pass def f_tfs_2_instClus(tfs): ''' convert the boolean table", "[] nei_labels = [] for cur_nb in range(0, N): #", "X_other, model) #if np.unique(yj).size != 1: model_a_clust.fit(Xj, yj) y_pred =", "the diagnal values. # sim = sim-diag(diag(sim) ); np.fill_diagonal(sim, 0)", "1 1 1 1 0 #% 1 1 1 0", "auc] def f_tr(X_tr, y_tr, model): model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr)", "# random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X_tr,", "ix_top_neighbors = ix[0:N] #print 'testing inst ' + str(j) #print", "scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model = BaggingClassifier(base_estimator", "make a tmp array a stores y tmp = np.matlib.repmat(y.reshape((y.size,", "y_tr[cur_nb_ix] ) #print ' clus_ids_to_use:' #print clus_ids_to_use # cluster id", "result. ''' n = len(A) if p0 == None: p0", "True], # [ True, True, True, False]]) def f_find_centers_rwMat(rw_mat, k):", "0.207, 0.220, 0.064, 0.154, 0.034, 0.034;... % 0.207, 0.425, 0.167,", "I = np.argsort(sim, kind='mergesort') + 1 # [~, I2] =", "won't know #% what is the top 1/2/3 cluster it", "1 1 0 #% 1 1 1 0 0 #%", "systematic evaluation criteria # % such as AUC, I will", "= p0 pl2norm = np.inf unchanged = 0 for i", "y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred else: if len(yj) <= 15:", "\\ model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ random_state=None, n_estimators =", "y_pred_whole[clus[j].flatten()] target = y_pred y_pred = f_quantileNorm(templete, target) # copy", "instance ---------- #n_partials = np.zeros( len(X_te) ) #n_wholes = np.zeros(", "0 0 0 % 0.5000 0 0.2500 0.3333 0 0", "0 #% 1 1 1 0 0 tfs = np.zeros((n_inst,", "where each node % has k edges to other nodes", "yj, X_other, model) #if np.unique(yj).size != 1: model_a_clust.fit(Xj, yj) y_pred", "col is for whole pred # # # ## k_fold", "2, 3, 4]) #>>> a.flatten('F') #array([1, 3, 2, 4]) '''", "f_fuzzy_rwr_clusters(X, 100) # pass def f_clus_to_tfs(clus, n_inst): #% convert the", "(np.argsort(I, kind='mergesort').T + 1).T # for every column, just keep", "size of each cluster. default is half the number of", "0 0 0 1.0000 0 0 % 0 0 0", "0:n_clusters] = y_pred_multi # make a tmp array a stores", "0 to col n_clusters-1 store the predictions by each cluster", "model) #def f_dec_tab_4_bg_svm(X, y, clus): # ''' # Calculate the", "p0 l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) )", "the initial probability. usually it is a zero matrix with", "testing instance ---------- #n_partials = np.zeros( len(X_te) ) #n_wholes =", "-0.0170, 0.8405, 0]]) # #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) # #array([[False, True,", "error. pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred", "import copy import numpy.matlib from sklearn.exceptions import NotFittedError #import FuzzyRwrBagging", "return [clus, models, dec_ixs] #def_deal_miss_v_1(d): ''' deal with missing values", "= y_pred_te y_pred_all[:, i+1] = y_pred return y_pred_all # test", "''' n = len(A) if p0 == None: p0 =", "y, clus, model): ''' Calculate the decision table corresponds to", "# the first col is the prediction using the whole", "models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s) y_pred_ICE", "example above, spdiags(sum(A)'.^(-1), 0, n, n) will be % 0.2500", "plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show() #sim = np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus", "each node. each element in the % column means the", "set of instances. ''' # the first col is the", "y_pred_fo = np.zeros( len(test_index) ) #print len(X_te) #print len(test_index) #print", "= np.zeros((len(X), len(X) ) ) for i in range(0, len(X)):", "= y_pred_whole print( j) print( 'fold ' + str(fold_i) +", "which may include even a remote cluster. each instance in", "= ix[0:N] #print 'testing inst ' + str(j) #print '", "each cluster of training data to predict the testing data.", "matrix to a network graph where each node % has", "spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \\ 0, n, n).toarray() ) p =", "rw print(' done RWR') # --------------------------------------------------------------- ixs_centers = f_find_centers_rwMat(rw_mat, k)", "y = y.flatten() y_pred = np.zeros(y.size) skf = StratifiedKFold(n_splits=k_fold, random_state=None,", "normalization when predict y_pred_ICE. # % ixsp is another cluster", "return clus_ordered #test #clus = f_fuzzy_rwr_clusters(X, 100) # pass def", "= np.array( range(0,100) ) b = np.matlib.repmat(a, 100, 1) ct", "alpha*n_partial + beta*N ) ) clus_ids_to_use = clus_ids_to_use + [0]", "#def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging): # ''' # corresponds", "# find the top 10 neighbors for each test instance", "# for train_index, test_index in skf.split(X, y): # #print(\"TRAIN:\", train_index,", "#print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train models and", "''' Use the training data to predict the testing data.", "the propability for each node. each element in the %", "all data rather than only a fold pred_prob_mat[:, n_clusters] =", "get 0.599 # all results are correct. def f_quantileNorm(templete, target):", "need normalization when predict y_pred_ICE. # % ixsp is another", "# pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa = '/Users/zg/Dropbox/bio/ICE_2018/' # pa", "#def f_dec_tab_4_bg_svm(X, y, clus): # ''' # Calculate the decision", "= f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE", "True) clus = [] for i in range(0, k): tmp", "of training data to predict the testing data. ''' y_pred_all", "y_pred_te = f_te(X_te, model_i) except : if model_i == 0:", "#mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X']", "= [] all_neibs = int( avgNeighborsSize * len1 ) print(", "io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast", "X_te, model) y_pred[test_index] = y_pred_fo #auc = roc_auc_score(y.flatten(), y_pred) return", "np.flip(np.sort(tmp), 0) len1 = len(rw_mat) #cutoffs = [] all_neibs =", "np.argsort(sim, kind='mergesort') + 1 # [~, I2] = sort(I); I2", "0 ''' #W = A * spdiags(sum(A)'.^(-1), 0, n, n);", "''' % the random walk algorithm. % A is the", "cluster') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred else:", "= scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3 clus = f_fuzzy_rwr_clusters(X, n_clus) tfs", "0.3, p0 is default, the result is: % [0.449, 0.207,", "to predict the testing data. Use whole training data to", "the cluster information from cell array to mat. But for", "inst. index of each cluster: ') ixs_centers = np.array(ixs_centers) print(ixs_centers[ix])", "0.8405], \\ # [0.3289, -0.0170, 0.8405, 0]]) # #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2)", "y_tr, clus, model) # ******************** re-clustering ******************** n_iter = 2", "aspect='auto') #plt.show() #sim = np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus = 100", "# 30:breast cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] #", "# 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3 clus =", "of clustering result to for each instance, what clusters it", "\"TEST:\", test_index) X_tr, X_te = X[train_index], X[test_index] y_tr, y_te =", "# % ixsp is another cluster form. # # corresponds", "= [] # 1. find the most connected center node", "# # for train_index, test_index in skf.split(X, y): # #print(\"TRAIN:\",", "distance between instances ''' sim = np.zeros(( len(X), len(X) ))", "cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models) y_pred_ICE = np.zeros(", "# # skf = StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X, y) # #", "''' calculate the euclidean distance between instances ''' sim =", "range(0, len(X)): for j in range(i+1, len(X)): tmp = distance.euclidean(X[i],", "# ''' # corresponds to f_weka_bg_svm_tr_te() in Matlab version #", "= np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1) err_mat = abs(pred_prob_mat - tmp", "list( np.array(a) + 1 ) clus_ids_to_use = a # number", "0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;... % 0.048, 0.099,", "w=0.4, s=0.5): ''' ''' # rwr based fuzzy clustering clus", "# [0.5566, 0, -0.0842, -0.0170], \\ # [0.6448, -0.0842, 0,", "3, 2, 4]) ''' a = np.array( range(0,100) ) b", "information will be lost - you won't know #% what", "#% clus e.g: #% 1x5 cell #% 1x195 double 1x193", "while self % prediction using 10-fold CV. corresponds to f_use_each_clus_forWhole_bg_svm()", "what clusters it belongs to. ''' inst_clus = [] for", "pred # col 0 to col n_clusters-1 store the predictions", "tmp ) print (' Done calculating error table and fitting", "0 0 #% 1 1 1 1 0 #% ...", "training data to predict Use each cluster of training data", "w, s) print (' Done calucating decision table') return [clus,", "with missing values by replacing them by mean. ''' def", "n_iter): clus = f_fuzzy_rwr_clusters(err_mat, n_clus) tfs = f_clus_to_tfs(clus, len(X_tr)) [err_mat,", "model_a_clust = np.unique(yj)[0] models.append(model_a_clust) return [y_pred_multi, models] # test #[y_pred_multi,", "model, fo_inner) print (' Done evaluation using each cluster') models.append(model_whole)", "% most connected node. ix = np.argmax(a) ixs.append(ix) # %", "return y_pred def f_k_fo(X, y, model, k_fold=10): ''' corresponds to", "0 0 0 % 0 0.2500 0 0 0 0", "This function counts the length of each double array. lens", "% 0 0 0 0 0 0 0.5000 % W", "= np.zeros(y.size) skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True) skf.get_n_splits(X, y) for", "whole set of instances. ''' # the first col is", "y_pred = np.zeros((y.size, 1)) # # skf = StratifiedKFold(n_splits=k_fold) #", "= f_quantileNorm(templete, target) # copy the normed prediction to the", "clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) ) #", "# sim = -sim; sim = np.zeros((len(X), len(X) ) )", "= 100 ) #y_pred = f_k_fo(X, y, model, k_fold=10) #", "skip...') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred if", "if np.unique(yj).size == 1: print (' warning, #unique class label(s)", "default, the result is: % [0.449, 0.207, 0.220, 0.064, 0.154,", "col, the result is by each cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr,", "a true-false matrix. ''' if each_clus_sz == None: # on", "clus_ids_to_use + [0] * n_whole #print ' clus_ids_to_use:' #print clus_ids_to_use", "Done evaluation using whole instances') print (' Start to evaluate", "np.zeros(y.size) skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True) skf.get_n_splits(X, y) for train_index,", "1 k_fold = 10 aucs_ICE = [] aucs_whole = []", "get 1.0 CV result. # breast cancer get 0.599 #", "average, how many clusters does one inst belongs to. #overlap_factor", "1s. # aRankNet = aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet, False) return aRankNet", "unchanged = 0 pl2norm = l2norm return p # test", "True: templete = y_pred_all[:, 0] target = y_pred_te y_pred =", "\"\"\" #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test", "#print len(X_te) #print len(test_index) #print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] ) #print y_pred_fo", "0 0 #% 1 1 0 0 0 #% 1", "0 % 0 0 0.2500 0 0 0 0 %", "= io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] #", ").flatten() clus.append(tmp) # --------------------------------------------------------------- # % sort the clusters lens", "\"\"\" import numpy as np #from scipy import io import", "train_index, test_index in skf.split(X, y): #print(\"TRAIN: \", train_index, \" TEST:", "% is wrong leading entrie cluster prediction is wrong, which", "good cluster(s) for each instance, which may include even a", "of using whole to pred. nei_labels = nei_labels + list(", "it a diagonal matrix # aRankNet = max(aRankNet, aRankNet'); aRankNet", "y tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1) err_mat = abs(pred_prob_mat", "aRank_k_neighbors) print(' done calculating the A-rank KNN graph') # %", "sim[j][i] = tmp sim = -sim np.fill_diagonal(sim, 0) return sim", ")) # the first col is the prediction using the", "0.034;... % 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;... %", "#print 'model '+str(i)+' good ' #y_pred_te = f_tr_te(Xi, yi, X_te,", "return y_pred # test #pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X']", "nSteps = 1000 lazi = 0.3 rw = RWR(ori_graph, nSteps,", "len(X)): tmp = distance.euclidean(X[i], X[j]) sim[i][j] = tmp sim[j][i] =", "clus, y_pred_whole, model, fo_inner): ''' % using each cluster data", "for j in range(0, len(X2) ): tmp = distance.euclidean(X1[i], X2[j])", "clus.append(tmp) # --------------------------------------------------------------- # % sort the clusters lens =", "values in the templete. Target may have a very different", "= f_err_mat(X, y, clus, model) \"\"\" def f_err_2_decMat(err_mat, tfs, adv_whole=0.4,", "y_pred_te = np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print 'model '+str(i)+' good '", "the col id in y_pred_all a = clus_ids_to_use a =", "import StratifiedKFold #import sklearn from scipy.sparse import spdiags from scipy.spatial", ":] ) [0] ) inst_clus.append(row) return inst_clus # test #inst_clus", "using each cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters] = y_pred_multi # make", "rank of clusters information will be lost - you won't", "\\ # random_state=None, n_estimators = 100 ) [err_mat, models] =", "# test ''' X_tr = X y_tr = y X_te", "target = y_pred y_pred = f_quantileNorm(templete, target) #else: # y_pred", "for j in range(1, 49): try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] #", "y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done predicting testing instances.') return", "i+1] = y_pred return y_pred_all # test #y_pred_all = f_ICE_tr_te_all_clus(X,", "first centroid. a = np.sum(rw_mat, axis=1) # axis=1 for rows;", "#dst = distance.euclidean(A) # corrent, the same as in Matlab", "(' warning, #unique class label(s) == 1') y_pred = np.zeros(y.size)", "return [y_pred_multi, models] # test #[y_pred_multi, models] = f_use_each_clus_forWhole(X, y,", "f_err_mat(X, y, clus, model) \"\"\" def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5):", "0.3333 0 0 0.5000 % 0 0 0 0.3333 0", "for col # % most connected node. ix = np.argmax(a)", "decision table') return [clus, models, dec_ixs] def f_ICE_pred(X_tr, y_tr, X_te,", "''' y_pred_all = np.zeros(( len(X_te), len(clus) + 1 )) #", "y_pred_whole[ix_other] target = y_pred y_pred = f_quantileNorm(templete, target) #else: #", "for each testing instance # find the top 10 neighbors", "0 0.3333 0 0 0.5000 % 0 0 0 0.3333", "def f_find_centers_rwMat(rw_mat, k): ''' % on the rw_mat matrix, find", "0 0 #% 1 1 1 0 0 tfs =", "corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version # corresponds to f_k_fo()", "= y_pred[:, 1].flatten() # quantileNorm templete = y_pred_whole[ix_other] target =", "% belongs to more than one cluster. # % This", "p0 pl2norm = np.inf unchanged = 0 for i in", "distance.euclidean(X1[i], X2[j]) sim[i][j] = tmp sim = -sim return sim", "for self ------------------ #if np.unique(yj).size == 1: # y_pred =", "take place in y_pred_self and y_pred_other, thus do not #", "y_pred_te.fill(np.nan) #print 'model '+str(i)+' good ' #y_pred_te = f_tr_te(Xi, yi,", "scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X']", "y_preds_whole[test_index] = y_pred_whole print( j) print( 'fold ' + str(fold_i)", "inst_clus stores that for each instance, which cluster(s) it belongs", "most connected node. ix = np.argmax(a) ixs.append(ix) # % 2.", "[] # k_fold of inner cross-validation fo_inner = 5 #", "also outputs the predicted values of y. # % support", "scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3 clus = f_fuzzy_rwr_clusters(X, n_clus) tfs =", "[ True, False, False, False], # [ True, False, False,", ", -1).T , 0, n, n).toarray() W = A.dot( spdiags(np.power(sum(np.float64(A))", "= np.array([[0,2,2,0,0,0,0],\\ [2,0,1,1,0,0,0],\\ [2,1,0,0,1,0,0],\\ [0,1,0,0,0,1,1],\\ [0,0,1,0,0,0,0],\\ [0,0,0,1,0,0,1],\\ [0,0,0,1,0,1,0]]) nSteps =", "f_te(X_te, model_whole) #y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model) #print", "0 0 0.5000 % 0 0 0 0.3333 0 0.5000", "doNorm == True: templete = y_pred_all[:, 0] target = y_pred_te", "y_pred y_pred = f_quantileNorm(templete, target) #else: # y_pred = np.zeros(X_other.size)", "of inner cross-validation fo_inner = 5 # --------------------------- WHOLE -------------------------", "y_pred_whole = f_k_fo(X, y, model_whole, fo_inner) model_whole.fit(X, y) # fit", "sim[i][j] = tmp sim = -sim return sim #test #sim", "n_clusters-1 store the predictions by each cluster # the last", "100, 1) ct = getCutoff(b, 70) ''' def f_len_of_each_ele(c1): #%", "def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner): ''' % using", "f_quantileNorm(templete, target) #else: # y_pred = np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0]) #", "y_pred_whole print (' Done evaluation using whole instances') print ('", "0 % 0 0.2500 0 0 0 0 0 %", "ix_target = np.argsort(target, kind='mergesort') ix_templete = np.argsort(templete, kind='mergesort') target[ix_target] =", "neighbour's pick cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use = list( dec_ixs[cur_nb_ix] )", "True) skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE = np.zeros( y.size", "neighbour_mat = f_eu_dist2(X_tr, X_te) # ---------- for each testing instance", "svm.LinearSVC(), \\ # bagging = BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators", "0] = f_te(X_te, model_whole) #y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te,", "f_k_fo(X, y, model, k_fold=10): ''' corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab", "cluster: ') ixs_centers = np.array(ixs_centers) print(ixs_centers[ix]) print(' size of each", "+ str(auc_ICE) + ' \\t ' + str(auc_whole) + '\\n')", "import io import scipy.io #import pickle from sklearn.model_selection import StratifiedKFold", "f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s) y_pred_ICE = f_ICE_pred(X_tr, y_tr,", "= np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError as e: # print(repr(e)) #", "connected node. ix = np.argmax(a) ixs.append(ix) # % 2. iteratively", "True return tfs # test #tfs = f_clus_to_tfs(clus, len(X)) #", "# y_tr, y_te = y[train_index], y[test_index] def f_use_each_clus_forWhole(X, y, clus,", "#test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer", "laziness, p0 = None): ''' % the random walk algorithm.", "sim = -sim return sim #test #sim = f_eu_dist2(X_tr, X_te)", "ix = np.argmax(a) ixs.append(ix) # % 2. iteratively find the", "shuffle=True) skf.get_n_splits(X, y) for train_index, test_index in skf.split(X, y): #print(\"TRAIN:", "[0,1,0,0,0,1,1],\\ [0,0,1,0,0,0,0],\\ [0,0,0,1,0,0,1],\\ [0,0,0,1,0,1,0]]) nSteps = 1000 lazi = 0.3", "''' # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ # bagging", "X_te, model) #print 'whole model good ' # start from", "% A is the input net matrix, with the diag", "= f_clus_to_tfs(clus, len(X_tr)) # train models and calculate the error-dicision", ") print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) + '\\t' +", "avoid the case that if one cluster decision # %", "Assume c1 is a 1-dimension cell array, and each element", "#y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show() #sim", "i in range(0, len(X1) ): for j in range(0, len(X2)", "of returning start node np.fill_diagonal(rw, 0) rw_mat = rw print('", "return [y_pred, auc] def f_tr(X_tr, y_tr, model): model_inner = copy.deepcopy(model)", "<= 15: print (' '+str(len(yj))+' insts in cluster, <= 15,", "skf.get_n_splits(X, y) y_preds_ICE = np.zeros( y.size ) y_preds_whole = np.zeros(", "f_k_fo(X, y, model, k_fold=10) # #print roc_auc_score(y.flatten(), y_pred) # the", "y[test_index] y_tr = y[train_index] if np.unique(y_tr).size == 1: y_pred_fo =", "0.2500 0 0 0 0 0 0 % 0 0.2500", ") ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors) print(' done calculating the A-rank", "replacing them by mean. ''' def f_ICE_fit_2(X_tr, y_tr, n_clus, model,", "scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph, interpolation='nearest',", "if unchanged > 10: break else: unchanged = 0 pl2norm", "means the probability to go to that node. % This", "tmp sim = -sim print(' done calculating the Euclidean distance", "aRankNet # test #sim = np.array([[0, 0.5566, 0.6448, 0.3289], \\", "set(clus[j].flatten()) ix_other = list(ix_other) #print ix_other X_other = X[ix_other ,", "cell array to mat. But for each #% instance, the", "belongs to. ''' inst_clus = [] for i in range(0,", "data. model_whole = copy.deepcopy(model) y_pred_whole = f_k_fo(X, y, model_whole, fo_inner)", "is the number of centroids. ''' ixs = [] #", "= [] for j in range(0, n_clusters): # for each", "0 0 0 0 % 0 0.2500 0 0 0", "range(0, len(clus)): #Xi = X_tr[clus[i].flatten(), :] #yi = y_tr[clus[i].flatten() ]", "Templete is the standard, change the target to the values", "dec_ixs stores the good cluster(s) for each instance, which may", "k_fold=10) # #print roc_auc_score(y.flatten(), y_pred) # the easy dataset mesothelioma", "y_pred_self and y_pred_other, thus do not # % need normalization", "cluster for i in range(0, len(clus)): #Xi = X_tr[clus[i].flatten(), :]", "return variable clus stores the instance indices for each cluster.", "done predicting remote instances') except ValueError as e: print(e) print('", "#np.fill_diagonal(sim, 0) #n_clus = 100 #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'),", "1 1 0 0 tfs = np.zeros((n_inst, len(clus)), dtype=bool) for", "spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray() W = A.dot(", "may include even a remote cluster. each instance in dec_ixs", "range(0, n_clusters): # for each cluster Xj = X[clus[j].flatten(), :]", "= y[train_index], y[test_index] [clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus,", "I2 > (len(sim) - k) # make it a diagonal", "% using each cluster data to predict the whole instances,", "= row[-1] - adv_whole inst_i_clus = inst_clus[i] if len(inst_i_clus) >", "s) y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index]", "1 0 0 0 #% 1 1 1 1 0", "the last col stores the pred by whole data #models", "range(0, len(clus)): tfs[clus[i], i] = True return tfs # test", "each neighbour's pick cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use = list( dec_ixs[cur_nb_ix]", "0 0 0 0 0 0 % 0 0.2500 0", "f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE y_pred_whole", "to predict the testing data. ''' y_pred_all = np.zeros(( len(X_te),", "10 neighbors for each test instance neighbour_col = neighbour_mat[:, j].flatten()", "on Mon Mar 5 05:47:03 2018 @author: zg \"\"\" import", "y_pred_whole, model, fo_inner): ''' % using each cluster data to", "in range(1, nSteps+1): if i % 100 == 0: print('", "y_pred[:, 1].flatten() #auc = roc_auc_score(y_te.flatten(), y_pred) return y_pred def f_k_fo(X,", "def f_tfs_2_instClus(tfs): ''' convert the boolean table representation of clustering", "% 0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0] % % if nSteps", "X, clus, model) def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):", "version ''' n_clusters = len(clus) y_pred_multi = np.zeros((y.size, n_clusters) )", "f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version # corresponds to f_k_fo() # '''", "e: # print(repr(e)) # y_pred_te = np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print", "= dec_mat(cur_nb_ix, :); #dec_row(:, end ) = dec_row(:, end )", "double 1x62 double #% #% tfs e.g: #% 295x5 double", "== 0: y_pred_te = np.zeros(len(X_te)) elif model_i == 1: y_pred_te", "yj) y_pred = model_a_clust.predict_proba(X_other) y_pred = y_pred[:, 1].flatten() # quantileNorm", "y_pred def f_tr_te(X_tr, y_tr, X_te, model): ''' corresponds to f_weka_bg_svm_tr_te()", "we cannot use a systematic evaluation criteria # % such", "#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus =", "n_clusters] = y_pred_whole print (' Done evaluation using whole instances')", "#print len(test_index) #print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] ) #print y_pred_fo else: y_pred_fo", "y_pred = f_quantileNorm(templete, target) # copy the normed prediction to", "# ******************************************************* [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print", "= (np.argsort(I, kind='mergesort').T + 1).T # for every column, just", "1x195 double 1x193 double 1x169 double 1x161 double 1x62 double", "''' corresponds to f_weka_bg_svm_tr_te() in Matlab version ''' #bagging =", "= np.zeros( y.size ) y_preds_whole = np.zeros( y.size ) fold_i", "if nSteps is 1000 and laziness is 0.3, p0 is", "= svm.SVC(kernel='linear'), \\ # random_state=None, n_estimators = 100 ) [err_mat,", "clus_id) = dec_row(:, clus_id) - adv_self; row = np.copy( err_mat[i,", "y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred if np.unique(yj).size == 1: print", "y_pred) # # return [y_pred, auc] # test ''' X_tr", ") inst_clus.append(row) return inst_clus # test #inst_clus = f_tfs_2_instClus(tfs) #def", "''' ixs = [] # 1. find the most connected", "1 1 1 0 #% 1 1 1 0 0", "= svm.LinearSVC(), \\ model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ random_state=None,", "yj, model, fo_inner) # quantileNorm templete = y_pred_whole[clus[j].flatten()] target =", "# the easy dataset mesothelioma get 1.0 CV result. #", "# copy to the whole array y_pred_multi[ix_other, j] = y_pred", "is the input net matrix, with the diag to be", "n_estimators = 100 ) # bagging.fit(X_tr, y_tr) # # y_pred", "f_eu_dist2(X1, X2): ''' calculate the euclidean distance between instances from", "to f_weka_bg_svm_tr_te() in Matlab version ''' #bagging = BaggingClassifier(base_estimator =", "# rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print", "the whole data # start from the second col, the", "some nodes as the centroids for soft % clustering. If", "start from the second col, the result is by each", "y_preds_ICE[test_index] = y_pred_ICE y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model) y_preds_whole[test_index]", "[err_mat, models] \"\"\" #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat =", "------------------------- # Predict each cluster using the whole data. model_whole", "y_te): # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ # bagging", "def f_k_fo(X, y, model, k_fold=10): ''' corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in", "= len(rw_mat) #cutoffs = [] all_neibs = int( avgNeighborsSize *", "row[-1] ) [0] ) #print ix_good_clus if len(ix_good_clus) > 0:", "skf.get_n_splits(X, y) for train_index, test_index in skf.split(X, y): #print(\"TRAIN: \",", "== 0: print(' done rwr ' + str(i-1) ) pnew", "it belongs to. dec_ixs stores the good cluster(s) for each", "np.array([[0, 0.5566, 0.6448, 0.3289], \\ # [0.5566, 0, -0.0842, -0.0170],", "the estimated size of each cluster. default is half the", "mean. ''' def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): '''", "return inst_clus # test #inst_clus = f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr, y_tr,", "# # # ## k_fold of inner cross-validation # #fo_inner", "predict the whole instances, while self % prediction using 10-fold", "decision table') return [clus, models, dec_ixs] #def_deal_miss_v_1(d): ''' deal with", "updated version also outputs the predicted values of y. #", "print (' Done evaluation using whole instances') print (' Start", "#print clus_ids_to_use #print nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done", "= [clus[i] for i in ix] print(' center inst. index", "f_tr_te(X_tr, y_tr, X_te, model): ''' corresponds to f_weka_bg_svm_tr_te() in Matlab", "multiprocessing def RWR(A, nSteps, laziness, p0 = None): ''' %", "X y_te = y [y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te,", "y_pred = y_pred[:, 1].flatten() return y_pred def f_tr_te(X_tr, y_tr, X_te,", "False, True], # [ True, False, True, False]]) # #array([[False,", "= fold_i + 1 auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole", "getCutoff(b, 70) ''' def f_len_of_each_ele(c1): #% Assume c1 is a", "= tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators = 100 ) #", "' clus_ids_to_use:' #print clus_ids_to_use # cluster id + 1 to", "0.035, 0.014, 0.154, 0.009, 0.425, 0.203;... % 0.017, 0.035, 0.014,", "% 0 0 0 0.3333 0 0 0.5000 % 0", "X_te, clus, models, doNorm=True): ''' Use the training data to", "model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) return model_inner def f_te(X_te, model):", "#X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X", "fitting ICE models') return [err_mat, models] \"\"\" #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\", "' # start from the second col, the result is", "# [ True, True, True, False]]) def f_find_centers_rwMat(rw_mat, k): '''", "s = 0.5 N = 5 alpha = 1 beta", "and calculate the error-dicision tables y_tr = y_tr.astype(float) #model =", "% This algorithm will converge. For example, for the above", "elif model_i == 1: y_pred_te = np.ones(len(X_te)) else: y_pred_te =", "#dec_row(:, end ) = dec_row(:, end ) - adv_whole; #dec_row(:,", "% nSteps: how many steps to walk % laziness: the", ") print( all_neibs) ct = a[all_neibs] return ct #test #>>>", "len(ix_good_clus) > 0: dec_mat[i, ix_good_clus] = True dec_ixs.append(ix_good_clus) else: dec_ixs.append([])", "skf.get_n_splits(X, y) # # for train_index, test_index in skf.split(X, y):", ") # sim = squareform(pdist(X)); # sim = -sim; sim", "inst_clus = [] for i in range(0, len(tfs)): row =", "list( dec_ixs[cur_nb_ix] ) clus_ids_to_use = clus_ids_to_use + clus_id_to_use # also", "= 100 ) model = svm.SVC(kernel='linear', probability = True) skf", "0.5000 0.5000 0 0 0 0 % 0.5000 0 0.2500", "f_ICE_tr_te_all_clus(X_tr, X_te, clus, models) y_pred_ICE = np.zeros( len(X_te) ) neighbour_mat", "False]]) def f_find_centers_rwMat(rw_mat, k): ''' % on the rw_mat matrix,", "if len(ix_good_clus) > 0: dec_mat[i, ix_good_clus] = True dec_ixs.append(ix_good_clus) else:", "y_pred = f_quantileNorm(templete, target) #else: # y_pred = np.zeros(X_other.size) #", "of each double array. lens = np.zeros(len(c1)) for i in", "the euclidean distance between instances from two datasets ''' sim", ") model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) y_pred = model_inner.predict_proba(X_te) y_pred", "instability. However, we cannot use a systematic evaluation criteria #", "% This version 3 adds the support for fuzzy clustering", "clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE y_pred_whole = f_tr_te(X_tr, y_tr,", "[] for i in range(0, len(tfs)): row = list( np.where(tfs[i,", "f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te): # #bagging = BaggingClassifier(base_estimator", "10-fold CV. corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version ''' n_clusters", "y_pred_multi[:, j] = y_pred else: if len(yj) <= 15: print", "= np.zeros(len(c1)) for i in range(0, len(c1)): lens[i] = len(c1[i])", "connected center node as the first centroid. a = np.sum(rw_mat,", "j] = y_pred model_a_clust = np.unique(yj)[0] models.append(model_a_clust) return [y_pred_multi, models]", "break else: if l2norm == pl2norm: unchanged = unchanged +1", "#def_deal_miss_v_1(d): ''' deal with missing values by replacing them by", "in range(0, len(X_te) ): # for each testing instance #", "% of instability. However, we cannot use a systematic evaluation", "instance, which cluster(s) it belongs to. dec_ixs stores the good", "Use the training data to predict the testing data. Use", "print(' size of each cluster: ') print(lens[ix]) print(' done RWR", "I] = sort(sim-diag(diag(sim) ) ); I = np.argsort(sim, kind='mergesort') +", "n_clusters+1) err_mat = abs(pred_prob_mat - tmp ) print (' Done", "= np.copy( err_mat[i, :] ) #print row row[-1] = row[-1]", "a systematic evaluation criteria # % such as AUC, I", "0.009, 0.203, 0.425] % % Each column represents the propability", "= np.zeros((n_inst, len(clus)), dtype=bool) for i in range(0, len(clus)): tfs[clus[i],", "a zero matrix with the diag to % be 1.", "f_dec_tab_4_bg_svm() in Matlab version # ''' # #n_clusters = len(clus)", "else: y_pred = y_pred_te y_pred_all[:, i+1] = y_pred return y_pred_all", "models used, based on parameters alpha, beta and N. n_whole", "# 37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto')", "f_k_fo(Xj, yj, model, fo_inner) # quantileNorm templete = y_pred_whole[clus[j].flatten()] target", "f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' ''' # rwr", "BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ # random_state=None, n_estimators = 100 )", "However, we cannot use a systematic evaluation criteria # %", "#clus = f_fuzzy_rwr_clusters(X, 100) # pass def f_clus_to_tfs(clus, n_inst): #%", "# [~, I] = sort(sim-diag(diag(sim) ) ); I = np.argsort(sim,", "y X_te = X y_te = y [y_pred, auc] =", "# % is wrong leading entrie cluster prediction is wrong,", "each cluster using the whole data. model_whole = copy.deepcopy(model) y_pred_whole", "return new # test #templete = X[:, 0] #target =", "= scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X =", "# start from the second col, the result is by", "unchanged = 0 for i in range(1, nSteps+1): if i", "clus, y_pred_whole, model) #def f_dec_tab_4_bg_svm(X, y, clus): # ''' #", "X_tr = X y_tr = y X_te = X y_te", "how many steps to walk % laziness: the probablity to", "n = len(A) if p0 == None: p0 = np.eye(n)", "rwr ' + str(i-1) ) pnew = (1-laziness) * W.dot(p)", "i] = True return tfs # test #tfs = f_clus_to_tfs(clus,", "spdiags(sum(A)'.^(-1), 0, n, n) will be % 0.2500 0 0", "every column, just keep the top k edges. #aRankNet =", "sort the clusters lens = f_len_of_each_ele(clus) ix = np.argsort(lens)[::-1] clus_ordered", "random_state=None, n_estimators = 100 ) model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr)", "# '3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X", "#print row ix_good_clus = list( np.where( row < row[-1] )", "models[-1] y_pred_all[:, 0] = f_te(X_te, model_whole) #y_pred_all[:, 0] = f_tr_te(X_tr,", "s) #[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w,", "] model_a_clust = copy.deepcopy(model) print(' Cluster '+str(j)+' started...') #if len(yj)", "the target to the values in the templete. Target may", "range(0, len(X1) ): for j in range(0, len(X2) ): tmp", "by each cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models) y_pred_ICE", ") #n_wholes = np.zeros( len(X_te) ) for j in range(0,", "% need normalization when predict y_pred_ICE. # % ixsp is", "f_err_mat(X, y, clus, model): ''' Calculate the decision table corresponds", "0: dec_mat[i, ix_good_clus] = True dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return [dec_mat,", "prediction is wrong, which is the reason # % of", "support for fuzzy clustering - one instance may # %", "tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators = 100 ) #y_pred =", "and np.unique(yj).size != 1: # ------------------ for self ------------------ #if", "leading entrie cluster prediction is wrong, which is the reason", "version also outputs the predicted values of y. # %", "1 0 0 tfs = np.zeros((n_inst, len(clus)), dtype=bool) for i", "data. ''' y_pred_all = np.zeros(( len(X_te), len(clus) + 1 ))", "y.size ) fold_i = 1 for train_index, test_index in skf.split(X,", "to 1 np.fill_diagonal(rw_net, True) clus = [] for i in", "f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te) ''' #def f_bg_tr_te(X_tr, y_tr, X_te, y_te,", "- adv_self; row = np.copy( err_mat[i, :] ) #print row", "100 ) model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) y_pred = model_inner.predict_proba(X_te)", "target[ix_target] = templete[ix_templete] new = target return new # test", ") [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # ********************", "0.028, 0.028;... % 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;...", "# predict #y_pred = f_tr_te(Xj, yj, X_other, model) #if np.unique(yj).size", "# print(repr(e)) # y_pred_te = np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print 'model", "for each node. each element in the % column means", "= np.array([[1,2], [3,4]]) #>>> a.flatten() #array([1, 2, 3, 4]) #>>>", "using each cluster data, while self # prediction using 10-fold", "sim = squareform(pdist(X)); # sim = -sim; sim = np.zeros((len(X),", "using each cluster data to predict the whole instances, while", "0.071, 0.071;... % 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;...", "set(range(0, y.size)) - set(clus[j].flatten()) ix_other = list(ix_other) #print ix_other X_other", "zg \"\"\" import numpy as np #from scipy import io", "copy.deepcopy(model) model_inner.fit(X_tr, y_tr) y_pred = model_inner.predict_proba(X_te) y_pred = y_pred[:, 1].flatten()", "); np.fill_diagonal(aRankNet, False) return aRankNet # test #sim = np.array([[0,", "test #dst = distance.euclidean(A) # corrent, the same as in", "returning start node np.fill_diagonal(rw, 0) rw_mat = rw print(' done", "give the same result. ''' n = len(A) if p0", "instances using each cluster data, while self # prediction using", "30:breast cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma", "# for each cluster Xj = X[clus[j].flatten(), :] yj =", "y_pred) return y_pred # test #pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X =", "not easy to find for a instance, which are the", "#def f_bg_k_fo_3(X, y, k_fold=10): # ''' # corresponds to f_weka_bgSvm_arff_k_fo_3_parfor()", "model.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() return y_pred def f_tr_te(X_tr, y_tr,", "fo_inner) print (' Done evaluation using each cluster') models.append(model_whole) pred_prob_mat[:,", "BaggingClassifier(base_estimator = svm.LinearSVC(), \\ #bagging = BaggingClassifier(BaseBagging, \\ # random_state=None,", "y_tr, X_te, model) #print 'whole model good ' # start", "for i in range(0, len(clus)): tfs[clus[i], i] = True return", "instance, what clusters it belongs to. ''' inst_clus = []", "''' a = np.array( range(0,100) ) b = np.matlib.repmat(a, 100,", "sim = np.zeros((len(X), len(X) ) ) for i in range(0,", "1 1 1 1 #% 1 0 0 0 0", "[err_mat, models] = f_err_mat(X, y, clus, model) \"\"\" def f_err_2_decMat(err_mat,", "np.fill_diagonal(rw, 0) rw_mat = rw print(' done RWR') # ---------------------------------------------------------------", "= np.argmax(a) ixs.append(ix) # % 2. iteratively find the rest", "= y_pred y_pred = f_quantileNorm(templete, target) # copy the normed", "= rw_mat > ct # % set the diagnal to", "the predictions by each cluster # the last col stores", "models) y_pred_ICE = np.zeros( len(X_te) ) neighbour_mat = f_eu_dist2(X_tr, X_te)", "dataset mesothelioma get 1.0 CV result. # breast cancer get", "#% Assume c1 is a 1-dimension cell array, and each", "len(clus) y_pred_multi = np.zeros((y.size, n_clusters) ) models = [] for", "' clus_ids_to_use:' #print clus_ids_to_use #print nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use])", "15 and np.unique(yj).size != 1: # ------------------ for self ------------------", "np.copy( err_mat[i, :] ) #print row row[-1] = row[-1] -", "beta and N. n_whole = int( round( alpha*n_partial + beta*N", "of each cluster. default is half the number of #", "to mat. But for each #% instance, the rank of", "#print ' clus_ids_to_use:' #print clus_ids_to_use # cluster id + 1", "A = [0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;... %", "corrent, the same as in Matlab def f_sim_2_aRankNet(sim, k=3): '''", "= len(clus) y_pred_multi = np.zeros((y.size, n_clusters) ) models = []", "col id in y_pred_all a = clus_ids_to_use a = list(", "y_pred_whole print( j) print( 'fold ' + str(fold_i) + '", "nSteps: how many steps to walk % laziness: the probablity", "% dec_mat. This solution will avoid the case that if", "if len(yj) > 15 and np.unique(yj).size != 1: # ------------------", "= pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa + 'data/res_ICE_bg_svm_py.txt' f_res", "n_clus = 100 w = 0.4 s = 0.5 N", "[] aucs_whole = [] # f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt'", "NotFittedError #import FuzzyRwrBagging as frb #from joblib import Parallel, delayed", "distance #import matplotlib.pyplot as plt from sklearn.ensemble import BaggingClassifier from", "f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None): # X: data # k: number of", "for i in range(0, len(c1)): lens[i] = len(c1[i]) return lens", "cluster data, while self # prediction using 10-fold CV. [y_pred_multi,", "models] = f_err_mat(X_tr, y_tr, clus, model) # ******************** re-clustering ********************", "f.write(str(j) + '\\t' + str(auc_ICE) + ' \\t ' +", "sim = np.zeros(( len(X), len(X) )) for i in range(0,", "test #templete = X[:, 0] #target = X[:, 1] #new", "StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE = np.zeros( y.size ) y_preds_whole =", "top neighbors:' #print ix_top_neighbors # ---------- find all neighbors' picks", "= copy.deepcopy(model) model_inner.fit(X_tr, y_tr) return model_inner def f_te(X_te, model): y_pred", "whole pred # # # ## k_fold of inner cross-validation", "data to predict the testing data. Use whole training data", "\\ #model = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ model = BaggingClassifier(base_estimator", "05:47:03 2018 @author: zg \"\"\" import numpy as np #from", "= tmp sim[j][i] = tmp sim = -sim np.fill_diagonal(sim, 0)", "# #print(\"TRAIN:\", train_index, \"TEST:\", test_index) # X_tr, X_te = X[train_index],", "ix = np.argmin(b) ixs.append(ix) return ixs # test #tmp =", "for i in range(1, nSteps+1): if i % 100 ==", "usually it is a zero matrix with the diag to", "f_bg_k_fo_3(X, y, k_fold=10): # ''' # corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in", "pass def f_tfs_2_instClus(tfs): ''' convert the boolean table representation of", "e.g: #% 295x5 double #% 1 0 0 0 0", "and y_pred_other, thus do not # % need normalization when", "= f_eu_dist(X) def f_eu_dist2(X1, X2): ''' calculate the euclidean distance", ") [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) [dec_mat, dec_ixs]", "whole data #models = [] # k_fold of inner cross-validation", "decision # % is wrong leading entrie cluster prediction is", "= list( np.where( row < row[-1] ) [0] ) #print", "tmp = rw_mat.flatten('F') a = np.flip(np.sort(tmp), 0) len1 = len(rw_mat)", "'./' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100 w = 0.4", "y_preds_whole.flatten() ) print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) + '\\t'", "a network graph where each node % has k edges", "- tmp ) print (' Done calculating error table and", "pa + 'data/res_ICE_SVM_py.txt' f = open(f_res, 'w') #for j in", "False, False, True], # [ True, False, True, False]]) #", "dec_ixs: for each instance, which clusters should be used. dec_ixs", "models] = f_err_mat(X, y, clus, model) \"\"\" def f_err_2_decMat(err_mat, tfs,", "clus, model) \"\"\" def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5): ''' Convert", "e: print(e) print(' skip this cluster') y_pred = np.zeros(y.size) y_pred.fill(np.nan)", "> ct # % set the diagnal to 1 np.fill_diagonal(rw_net,", "model_whole = models[-1] y_pred_all[:, 0] = f_te(X_te, model_whole) #y_pred_all[:, 0]", "pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred #", "the euclidean distance between instances ''' sim = np.zeros(( len(X),", "#y_pred = f_k_fo(X, y, model, k_fold=10) # #print roc_auc_score(y.flatten(), y_pred)", "each instance, which clusters should be used. dec_ixs = []", "train_index, \"TEST:\", test_index) X_tr, X_te = X[train_index], X[test_index] y_tr, y_te", "10-fold CV. [y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \\ y_pred_whole,", "scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X']", "# 1. find the most connected center node as the", "-0.0842, 0, 0.8405], \\ # [0.3289, -0.0170, 0.8405, 0]]) #", "just random pickup some nodes as centroids, that is %", "10 # # --------------------------- WHOLE ------------------------- # # # ---------------------------", "using the whole data # start from the second col,", "('Done predicting testing instances.') return y_pred_ICE # test # pa", "using the whole data model_whole = models[-1] y_pred_all[:, 0] =", "lazi = 0.3 rw = RWR(ori_graph, nSteps, lazi) # remove", "# also find neighbor's label. maybe will be used later", "ixs.append(ix) return ixs # test #tmp = f_find_centers_rwMat(rw_mat, 10) def", "1).T # for every column, just keep the top k", "= f_eu_dist2(X_tr, X_te) def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None): # X: data", "len(X)) y = y.astype(float) #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\", ") for j in range(0, len(X_te) ): # for each", "f_tr_te(X_tr, y_tr, X_te, model) y_preds_whole[test_index] = y_pred_whole print( j) print(", "= 0 pl2norm = l2norm return p # test RWR()", "should be 1d n by 1 array. f_my_quantileNorm() ''' ix_target", "in range(0, k): tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten() clus.append(tmp)", "matrix, find some nodes as the centroids for soft %", "True, False, False, True], # [ True, True, True, False]])", "y_te = y[train_index], y[test_index] y_tr = y[train_index] if np.unique(y_tr).size ==", "fit the col id in y_pred_all a = clus_ids_to_use a", "size of each cluster: ') print(lens[ix]) print(' done RWR clustering')", "return y_pred_ICE # test # pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa", "0 0 0 0 0.5000 0 % 0 0 0", ") pnew = (1-laziness) * W.dot(p) + laziness * p0", "the testing data. Use whole training data to predict Use", "= ix[::-1] ix_top_neighbors = ix[0:N] #print 'testing inst ' +", "0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0] % % if", "#array([[False, True, True, True], # [ True, False, False, True],", "double array. lens = np.zeros(len(c1)) for i in range(0, len(c1)):", "each instance, what clusters it belongs to. ''' inst_clus =", "test_index in skf.split(X, y): #print(\"TRAIN: \", train_index, \" TEST: \",", "clus, models, doNorm=True): ''' Use the training data to predict", "stores the prediction error. # #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col", "j].flatten() ix = np.argsort(neighbour_col ) ix = ix[::-1] ix_top_neighbors =", "y = y.astype(float) #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ #model", "% Convert the similarity matrix to a network graph where", "a.flatten('F') #array([1, 3, 2, 4]) ''' a = np.array( range(0,100)", "len(X) ) ) for i in range(0, len(X)): for j", "0 % 0 0 0 0.3333 0 0 0 %", "y[train_index] if np.unique(y_tr).size == 1: y_pred_fo = np.zeros( len(test_index) )", "1 np.fill_diagonal(rw_net, True) clus = [] for i in range(0,", "(auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) + '\\t' + str(auc_ICE) +", "sim-diag(diag(sim) ); np.fill_diagonal(sim, 0) # [~, I] = sort(sim-diag(diag(sim) )", "in Matlab def f_sim_2_aRankNet(sim, k=3): ''' % Convert the similarity", "y_pred_multi # make a tmp array a stores y tmp", "= abs(pred_prob_mat - tmp ) print (' Done calculating error", "> 10: break else: unchanged = 0 pl2norm = l2norm", "representation of clustering result to for each instance, what clusters", "y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1): ''' clus and inst_clus contains", "dec_ixs] def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1): ''' clus", "estimated size of each cluster. default is half the number", "of instances. ''' # the first col is the prediction", "y, k_fold=10): # ''' # corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab", "target): ''' Templete is the standard, change the target to", "in range(0, len(X2) ): tmp = distance.euclidean(X1[i], X2[j]) sim[i][j] =", "i in range(0, len(X)): for j in range(i+1, len(X)): tmp", "k_fold=10): ''' corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version ''' y", "svm #from sklearn import metrics from sklearn.metrics import roc_auc_score from", "''' # the first col is the prediction using the", "models = [] for j in range(0, n_clusters): # for", "node. ix = np.argmax(a) ixs.append(ix) # % 2. iteratively find", "i in range(0, len(c1)): lens[i] = len(c1[i]) return lens def", "done RWR') # --------------------------------------------------------------- ixs_centers = f_find_centers_rwMat(rw_mat, k) ct =", "range(0, n_iter): clus = f_fuzzy_rwr_clusters(err_mat, n_clus) tfs = f_clus_to_tfs(clus, len(X_tr))", "return [dec_mat, dec_ixs] #[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr,", "True, True, False], # [ True, False, False, False], #", "target to the values in the templete. Target may have", "1d double #% array. This function counts the length of", "row = np.copy( err_mat[i, :] ) #print row row[-1] =", "#% 1 1 1 1 0 #% 1 1 1", "= np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done predicting testing instances.') return y_pred_ICE", "f = open(f_res, 'w') #for j in range(1, 50): for", "------------------ #if np.unique(yj).size == 1: # y_pred = np.zeros(yj.size) #", "Matlab version # ''' # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(),", "avg. cluster size = '+str(each_clus_sz) ) # sim = squareform(pdist(X));", "k) # make it a diagonal matrix # aRankNet =", "model_whole, fo_inner) model_whole.fit(X, y) # fit a model using all", "clus_ids_to_use = clus_ids_to_use + clus_id_to_use # also find neighbor's label.", "[ True, False, False, True], # [False, False, True, False]])", "distance matrix') # --------------------------------------------------------------- aRank_k_neighbors = np.ceil(np.log10(len(sim)) ) ori_graph =", "# ******************** re-clustering ******************** n_iter = 2 for i in", "data structure is not easy to find for a instance,", "y_pred_multi[:, j] = y_pred model_a_clust = np.unique(yj)[0] models.append(model_a_clust) return [y_pred_multi,", "stores the prediction error. pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is", "nSteps is 1000 and laziness is 0.3, p0 is default,", "100 ) [err_mat, models] = f_err_mat(X, y, clus, model) \"\"\"", "random walk algorithm. % A is the input net matrix,", "0 0 0 0.5000 0 % 0 0 0 0", "# print(\"TRAIN:\", train_index, \"TEST:\", test_index) X_tr, X_te = X[train_index], X[test_index]", "the whole data model_whole = models[-1] y_pred_all[:, 0] = f_te(X_te,", "y[ix_other ] # predict #y_pred = f_tr_te(Xj, yj, X_other, model)", "j] = y_pred if np.unique(yj).size == 1: print (' warning,", "f_tfs_2_instClus(tfs): ''' convert the boolean table representation of clustering result", "# # y_pred = bagging.predict_proba(X_te) # y_pred = y_pred[:, 1].flatten()", "n_clus) tfs = f_clus_to_tfs(clus, len(X_tr)) [err_mat, models] = f_err_mat(X_tr, y_tr,", "range(0, len(X_te) ): # for each testing instance # find", "= (1-laziness) * W.dot(p) + laziness * p0 l2norm =", "to. #% #% clus e.g: #% 1x5 cell #% 1x195", "0, n, n).toarray() W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \\", "to find for a instance, which are the clusters it", "): # for each testing instance # find the top", "k edges to other nodes (aRank). ''' # delete the", "dec_mat(cur_nb_ix, :); #dec_row(:, end ) = dec_row(:, end ) -", "from cell array to mat. But for each #% instance,", "= np.zeros(( len(X1), len(X2) )) for i in range(0, len(X1)", "predict the testing data. ''' y_pred_all = np.zeros(( len(X_te), len(clus)", "adv_whole inst_i_clus = inst_clus[i] if len(inst_i_clus) > 0: row[inst_i_clus] =", "a 1d double #% array. This function counts the length", "#pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred #", "using whole to pred. nei_labels = nei_labels + list( y_tr[cur_nb_ix]", "sort(I); I2 = (np.argsort(I, kind='mergesort').T + 1).T # for every", "for the above matrix, nSteps = % 100, 1000 or", "'testing inst ' + str(j) #print ' ix of top", "from scipy.spatial import distance #import matplotlib.pyplot as plt from sklearn.ensemble", "self ------------------ #if np.unique(yj).size == 1: # y_pred = np.zeros(yj.size)", "100 #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ # random_state=None, n_estimators", "% W will be: % 0 0.5000 0.5000 0 0", "4]) #>>> a.flatten('F') #array([1, 3, 2, 4]) ''' a =", "1 1 1 0 0 #% 1 1 0 0", "= y X_te = X y_te = y [y_pred, auc]", "n_clusters) ) models = [] for j in range(0, n_clusters):", "tfs = f_clus_to_tfs(clus, len(X_tr)) [err_mat, models] = f_err_mat(X_tr, y_tr, clus,", "dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s) #[clus, models,", "f_use_each_clus_forWhole_bg_svm() in Matlab version ''' n_clusters = len(clus) y_pred_multi =", "the prediction error. # #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is", "np.unique(yj)[0] models.append(model_a_clust) return [y_pred_multi, models] # test #[y_pred_multi, models] =", "def f_sim_2_aRankNet(sim, k=3): ''' % Convert the similarity matrix to", "which are the clusters it belongs to, thus we also", "above, spdiags(sum(A)'.^(-1), 0, n, n) will be % 0.2500 0", "= [] inst_clus = f_tfs_2_instClus(tfs) for i in range(0, len(err_mat)):", "clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train models and calculate", "belongs to. dec_ixs stores the good cluster(s) for each instance,", "each neighbour # find each neighbour's pick cur_nb_ix = ix_top_neighbors[cur_nb]", "dec_row(:, end ) - adv_whole; #dec_row(:, clus_id) = dec_row(:, clus_id)", "# #n_clusters = len(clus) # ## dec_mat stores the prediction", "= I2 > (len(sim) - k) # make it a", "reason # % of instability. However, we cannot use a", "== True: templete = y_pred_all[:, 0] target = y_pred_te y_pred", "False, True], # [False, False, True, False]]) # #array([[False, True,", ":] #y_other = y[ix_other ] # predict #y_pred = f_tr_te(Xj,", ") fold_i = 1 for train_index, test_index in skf.split(X, y):", "instances') # ------------------ for other ----------------- ix_other = set(range(0, y.size))", "pred # # # ## k_fold of inner cross-validation #", "RWR -------- nSteps = 1000 lazi = 0.3 rw =", "% % Each column represents the propability for each node.", "clus stores the instance indices for each cluster. However, this", "clustering starts...') print(' NO. clusters = '+str(k)+'; avg. cluster size", "0 % 0.5000 0 0.2500 0.3333 0 0 0 %", "done calculating the Euclidean distance matrix') # --------------------------------------------------------------- aRank_k_neighbors =", "= f_find_centers_rwMat(rw_mat, k) ct = getCutoff(rw_mat, each_clus_sz) rw_net = rw_mat", "''' y = y.flatten() y_pred = np.zeros(y.size) skf = StratifiedKFold(n_splits=k_fold,", "steps to walk % laziness: the probablity to go back.", "= scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model =", "y_pred) # the easy dataset mesothelioma get 1.0 CV result.", "is wrong, which is the reason # % of instability.", "cur_nb in range(0, N): # for each neighbour # find", "= f_te(X_te, model_i) except : if model_i == 0: y_pred_te", "return y_pred_all # test #y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model)", "f_clus_to_tfs(clus, len(X_tr)) # train models and calculate the error-dicision tables", "not contain the whole set of instances. ''' # the", ") ) clus_ids_to_use = clus_ids_to_use + [0] * n_whole #print", "0.2500 0 0 1.0000 0 0 % 0 0.2500 0", "0.039, 0.431, 0.027, 0.232, 0.232;... % 0.038, 0.029, 0.081, 0.009,", "from sklearn.exceptions import NotFittedError #import FuzzyRwrBagging as frb #from joblib", "100 == 0: print(' done rwr ' + str(i-1) )", "error table and fitting ICE models') return [err_mat, models] \"\"\"", "edges to other nodes (aRank). ''' # delete the diagnal", "-------- RWR -------- nSteps = 1000 lazi = 0.3 rw", "% In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will", "0 0 0 0 0 0.5000 0 % 0 0", "= f_sim_2_aRankNet(sim, aRank_k_neighbors) print(' done calculating the A-rank KNN graph')", "def f_tr(X_tr, y_tr, model): model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) return", ") [0] ) #print ix_good_clus if len(ix_good_clus) > 0: dec_mat[i,", "from the cluster-cluster dec_mat to instance-cluster # % dec_mat. This", "i in range(1, k): tmp = rw_mat[:, ixs] b =", "''' n_clusters = len(clus) y_pred_multi = np.zeros((y.size, n_clusters) ) models", "cluster, while inst_clus stores that for each instance, which cluster(s)", "cluster ') # --------------------------- SELF ------------------------- # predict the whole", "[dec_mat, dec_ixs] #[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr, X_te,", "#test #sim = f_eu_dist2(X_tr, X_te) def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None): #", "#% 1 1 1 1 0 #% ... #% 1", "belongs to. #overlap_factor = 2; # the estimated size of", "# pass def f_tfs_2_instClus(tfs): ''' convert the boolean table representation", "each cluster data to predict the whole instances, while self", "#>>> a.flatten('F') #array([1, 3, 2, 4]) ''' a = np.array(", "ids for each cluster, while inst_clus stores that for each", "clusters # % normalization take place in y_pred_self and y_pred_other,", "to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version ''' y = y.flatten() y_pred", "[ True, False, False, True], # [ True, True, True,", "X[:, 1] #new = f_quantileNorm(templete, target) #def f_bg_k_fo_3(X, y, k_fold=10):", "#y_pred = f_tr_te(Xj, yj, X_other, model) #if np.unique(yj).size != 1:", "clus is the instances ids for each cluster, while inst_clus", "#y_other = y[ix_other ] # predict #y_pred = f_tr_te(Xj, yj,", "Done evaluation using each cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters] = y_pred_multi", "= nei_labels + list( y_tr[cur_nb_ix] ) #print ' clus_ids_to_use:' #print", "= tmp sim[j][i] = tmp sim = -sim print(' done", "try: y_pred = f_k_fo(Xj, yj, model, fo_inner) # quantileNorm templete", "decision table. ''' dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool)", "tfs e.g: #% 295x5 double #% 1 0 0 0", "rw_mat matrix, find some nodes as the centroids for soft", "models] = f_use_each_clus_forWhole(X, y, clus, \\ y_pred_whole, model, fo_inner) print", "= np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col", "as centroids, that is % not good for fuzzy clusters.", "testing data. Use whole training data to predict Use each", "#new = f_quantileNorm(templete, target) #def f_bg_k_fo_3(X, y, k_fold=10): # '''", "= np.argsort(neighbour_col ) ix = ix[::-1] ix_top_neighbors = ix[0:N] #print", "in skf.split(X, y): # #print(\"TRAIN:\", train_index, \"TEST:\", test_index) # X_tr,", "0.3 rw = RWR(ori_graph, nSteps, lazi) # remove probability of", "in range(0, n_clusters): # for each cluster Xj = X[clus[j].flatten(),", "0.3 RWR(A, nSteps, lazi, None) ''' # test #dst =", "# aRankNet = max(aRankNet, aRankNet'); aRankNet = np.logical_or(aRankNet, aRankNet.T) #", "= list( np.where(tfs[i, :] ) [0] ) inst_clus.append(row) return inst_clus", "iteratively find the rest nodes for i in range(1, k):", "for i in ix] print(' center inst. index of each", "This version changed from the cluster-cluster dec_mat to instance-cluster #", "sim #test #sim = f_eu_dist2(X_tr, X_te) def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None):", "than 3 clusters # % normalization take place in y_pred_self", "instance, which clusters should be used. dec_ixs = [] inst_clus", "= X[train_index], X[test_index] y_tr, y_te = y[train_index], y[test_index] [clus, models,", "whole data model_whole = models[-1] y_pred_all[:, 0] = f_te(X_te, model_whole)", "np.zeros(len(c1)) for i in range(0, len(c1)): lens[i] = len(c1[i]) return", "element in the % column means the probability to go", "copy to the whole array y_pred_multi[ix_other, j] = y_pred print('", "X[train_index], X[test_index] # y_tr, y_te = y[train_index], y[test_index] def f_use_each_clus_forWhole(X,", "from sklearn.ensemble import BaggingClassifier from sklearn import svm #from sklearn", "'model '+str(i)+' good ' #y_pred_te = f_tr_te(Xi, yi, X_te, model)", "A is the input net matrix, with the diag to", "ix = np.argsort(neighbour_col ) ix = ix[::-1] ix_top_neighbors = ix[0:N]", "return sim #test #sim = f_eu_dist(X) def f_eu_dist2(X1, X2): '''", "extra col is for whole pred # # # ##", "dec_ixs] = f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True):", "= f_err_mat(X_tr, y_tr, clus, model) [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs,", "criteria # % such as AUC, I will try using", "model_whole) #y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model) #print 'whole", "= StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X, y) # # for train_index, test_index", "dec_ixs.append([]) return [dec_mat, dec_ixs] #[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs) def", "[y_pred, auc] # test ''' X_tr = X y_tr =", "''' The return variable clus stores the instance indices for", "used n_partial = len(clus_ids_to_use) # number of whole models used,", "range(0, N): # for each neighbour # find each neighbour's", "k_fold of inner cross-validation # #fo_inner = 10 # #", "[~, I] = sort(sim-diag(diag(sim) ) ); I = np.argsort(sim, kind='mergesort')", "than the templete. templete and target should be 1d n", "aRankNet'); aRankNet = np.logical_or(aRankNet, aRankNet.T) # remove the diagonal 1s.", "y_pred = y_pred[:, 1].flatten() # quantileNorm templete = y_pred_whole[ix_other] target", ") # bagging.fit(X_tr, y_tr) # # y_pred = bagging.predict_proba(X_te) #", "f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' This version use", "--------------------------------------------------------------- # % sort the clusters lens = f_len_of_each_ele(clus) ix", "#overlap_factor = 2; # the estimated size of each cluster.", "y): #print(\"TRAIN: \", train_index, \" TEST: \", test_index) X_tr, X_te", ":] #yi = y_tr[clus[i].flatten() ] model_i = models[i] #model_a_clust =", "0 #% 1 1 1 1 0 #% ... #%", "is wrong leading entrie cluster prediction is wrong, which is", "#if np.unique(yj).size == 1: # y_pred = np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0])", "#auc = roc_auc_score(y_te.flatten(), y_pred) return y_pred def f_k_fo(X, y, model,", "% 0 0 0 0.3333 0 0 0 % 0", "range than the templete. templete and target should be 1d", "to a true-false matrix. ''' if each_clus_sz == None: #", "y_tr, n_clus, model, w, s) y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te,", "test #tfs = f_clus_to_tfs(clus, len(X)) # pass def f_tfs_2_instClus(tfs): '''", "RWR() ''' A = np.array([[0,2,2,0,0,0,0],\\ [2,0,1,1,0,0,0],\\ [2,1,0,0,1,0,0],\\ [0,1,0,0,0,1,1],\\ [0,0,1,0,0,0,0],\\ [0,0,0,1,0,0,1],\\", "X[ix_other , :] #y_other = y[ix_other ] # predict #y_pred", "# % belongs to more than one cluster. # %", "= model.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() return y_pred def f_tr_te(X_tr,", "data model_whole = models[-1] y_pred_all[:, 0] = f_te(X_te, model_whole) #y_pred_all[:,", "y_pred_all # test #y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model) def", "model_inner.fit(X_tr, y_tr) return model_inner def f_te(X_te, model): y_pred = model.predict_proba(X_te)", "nei_labels = [] for cur_nb in range(0, N): # for", "#from joblib import Parallel, delayed #import multiprocessing def RWR(A, nSteps,", "breast cancer get 0.599 # all results are correct. def", "Matlab version ''' n_clusters = len(clus) y_pred_multi = np.zeros((y.size, n_clusters)", "wrong, which is the reason # % of instability. However,", "sklearn import tree import copy import numpy.matlib from sklearn.exceptions import", "None: p0 = np.eye(n) ''' % In the example above,", ") for i in range(0, len(X)): for j in range(i+1,", "f_ICE_tr_te_all_clus(X, X, clus, model) def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4,", "one inst belongs to. #overlap_factor = 2; # the estimated", "label. maybe will be used later as KNN pred #", "to other nodes (aRank). ''' # delete the diagnal values.", "the values in the templete. Target may have a very", "# --------------------------------------------------------------- # % sort the clusters lens = f_len_of_each_ele(clus)", "to re-clustering ''' # rwr based fuzzy clustering clus =", "= np.zeros( len(X_te) ) for j in range(0, len(X_te) ):", "list(ix_other) #print ix_other X_other = X[ix_other , :] #y_other =", "y, clus, model) \"\"\" def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5): '''", "len(X2) ): tmp = distance.euclidean(X1[i], X2[j]) sim[i][j] = tmp sim", "version # ''' # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\", "of whole models used, based on parameters alpha, beta and", "model): y_pred = model.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() return y_pred", "y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] =", "if len(inst_i_clus) > 0: row[inst_i_clus] = row[inst_i_clus] - adv_self #print", "1].flatten() # # auc = roc_auc_score(y_te.flatten(), y_pred) # # return", "= '+str(each_clus_sz) ) # sim = squareform(pdist(X)); # sim =", "True dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return [dec_mat, dec_ixs] #[dec_mat, dec_ixs] =", "= f_len_of_each_ele(clus) ix = np.argsort(lens)[::-1] clus_ordered = [clus[i] for i", "the rank of clusters information will be lost - you", "kind='mergesort') target[ix_target] = templete[ix_templete] new = target return new #", "This algorithm will converge. For example, for the above matrix,", "Matlab version # ''' # #n_clusters = len(clus) # ##", "n_clus, model, w=0.4, s=0.5): ''' ''' # rwr based fuzzy", "find each neighbour's pick cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use = list(", "# corresponds to f_k_fo() # ''' # y_pred = np.zeros((y.size,", "predict the testing data. Use whole training data to predict", "index of each cluster: ') ixs_centers = np.array(ixs_centers) print(ixs_centers[ix]) print('", "False], # [ True, False, False, True], # [False, False,", "is a 1-dimension cell array, and each element is a", "#% 1 1 0 0 0 #% 1 1 1", "pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer y", "+ 'data/res_ICE_bg_svm_py.txt' f_res = pa + 'data/res_ICE_SVM_py.txt' f = open(f_res,", "False, False, True], # [False, False, True, False]]) # #array([[False,", ") p = pnew if l2norm < np.finfo(float).eps: break else:", "y_pred_whole, model) #def f_dec_tab_4_bg_svm(X, y, clus): # ''' # Calculate", "X2): ''' calculate the euclidean distance between instances from two", "y_pred_multi[clus[j].flatten(), j] = y_pred print(' c-'+str(j)+' done predicting local instances')", "rw_mat.flatten('F') a = np.flip(np.sort(tmp), 0) len1 = len(rw_mat) #cutoffs =", "between instances from two datasets ''' sim = np.zeros(( len(X1),", "pl2norm = np.inf unchanged = 0 for i in range(1,", "j] = y_pred else: if len(yj) <= 15: print ('", "y) y_preds_ICE = np.zeros( y.size ) y_preds_whole = np.zeros( y.size", "# % 2. iteratively find the rest nodes for i", "0, n, n) will be % 0.2500 0 0 0", "0: print(' done rwr ' + str(i-1) ) pnew =", "end ) = dec_row(:, end ) - adv_whole; #dec_row(:, clus_id)", "y[train_index], y[test_index] def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner): '''", "data. y_pred_multi[clus[j].flatten(), j] = y_pred print(' c-'+str(j)+' done predicting local", "in range(i+1, len(X)): tmp = distance.euclidean(X[i], X[j]) sim[i][j] = tmp", "result to for each instance, what clusters it belongs to.", "= f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te) ''' #def f_bg_tr_te(X_tr, y_tr, X_te,", "0 0 % 0.5000 0.2500 0 0 1.0000 0 0", "# % support more than 3 clusters # % normalization", "for j in range(i+1, len(X)): tmp = distance.euclidean(X[i], X[j]) sim[i][j]", "= np.zeros( y.size ) fold_i = 1 for train_index, test_index", "print(' done rwr ' + str(i-1) ) pnew = (1-laziness)", "1x62 double #% #% tfs e.g: #% 295x5 double #%", "in range(0, len(c1)): lens[i] = len(c1[i]) return lens def f_eu_dist(X):", "nei_labels + list( y_tr[cur_nb_ix] ) #print ' clus_ids_to_use:' #print clus_ids_to_use", "the diag to % be 1. % % for example,", "% A = [0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;...", "cluster prediction is wrong, which is the reason # %", "pickup some nodes as centroids, that is % not good", "default is half the number of # instances. each_clus_sz=len(X)/3 print('RWR-based", "lazi, None) ''' # test #dst = distance.euclidean(A) # corrent,", "''' convert the boolean table representation of clustering result to", "# # --------------------------- WHOLE ------------------------- # # # --------------------------- SELF", "[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print (' Done", "len(yj) > 15 and np.unique(yj).size != 1: # ------------------ for", "standard, change the target to the values in the templete.", "[0.3289, -0.0170, 0.8405, 0]]) # #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) # #array([[False,", "aucs_whole = [] # f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res", "= None): ''' % the random walk algorithm. % A", "the top k edges. #aRankNet = (I2 >length(sim)-k); aRankNet =", "(' Done calucating decision table') return [clus, models, dec_ixs] #def_deal_miss_v_1(d):", "model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) y_pred = model_inner.predict_proba(X_te) y_pred =", "f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version ''' y = y.flatten() y_pred =", "np.array( range(0,100) ) b = np.matlib.repmat(a, 100, 1) ct =", ") clus_ids_to_use = a # number of partial models used", "''' def f_len_of_each_ele(c1): #% Assume c1 is a 1-dimension cell", "# test #dst = distance.euclidean(A) # corrent, the same as", "X2[j]) sim[i][j] = tmp sim = -sim return sim #test", "adv_whole=0.4, adv_self=0.5): ''' Convert the err table to decision table.", "= [] nei_labels = [] for cur_nb in range(0, N):", "for whole pred # col 0 to col n_clusters-1 store", "rw_mat[:, ixs] b = np.sum(tmp, axis=1) b[ixs] = np.inf #", "= np.zeros((y.size, n_clusters) ) models = [] for j in", "# prediction using 10-fold CV. [y_pred_multi, models] = f_use_each_clus_forWhole(X, y,", "= max(np.sqrt(sum((pnew - p) ** 2) ) ) p =", "array. This function counts the length of each double array.", "inst belongs to. #overlap_factor = 2; # the estimated size", "30:breast cancer ##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma", "0 for col # % most connected node. ix =", "result is by each cluster for i in range(0, len(clus)):", "will give the same result. ''' n = len(A) if", "getCutoff(rw_mat, avgNeighborsSize): tmp = rw_mat.flatten('F') a = np.flip(np.sort(tmp), 0) len1", "to a network graph where each node % has k", "return lens def f_eu_dist(X): ''' calculate the euclidean distance between", "each_clus_sz == None: # on average, how many clusters does", "except ValueError as e: print(e) print(' skip this cluster') y_pred", "+ 1 ) clus_ids_to_use = a # number of partial", "[3,4]]) #>>> a.flatten() #array([1, 2, 3, 4]) #>>> a.flatten('F') #array([1,", "svm.LinearSVR(), \\ #model = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ model =", "sim[i][j] = tmp sim[j][i] = tmp sim = -sim np.fill_diagonal(sim,", "y_tr[clus[i].flatten() ] model_i = models[i] #model_a_clust = copy.deepcopy(model) try: y_pred_te", "print(' done RWR') # --------------------------------------------------------------- ixs_centers = f_find_centers_rwMat(rw_mat, k) ct", "the case that if one cluster decision # % is", "True, True, True], # [ True, False, False, False], #", "the result is: % [0.449, 0.207, 0.220, 0.064, 0.154, 0.034,", "y_pred_whole, model, fo_inner) print (' Done evaluation using each cluster')", "0 0 0.2500 0 0 0 0 % 0 0", "i in range(0, len(tfs)): row = list( np.where(tfs[i, :] )", "') # --------------------------- SELF ------------------------- # predict the whole instances", "model, k_fold=10) # #print roc_auc_score(y.flatten(), y_pred) # the easy dataset", "in range(1, k): tmp = rw_mat[:, ixs] b = np.sum(tmp,", "X_te, y_te) ''' #def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging): #", "import Parallel, delayed #import multiprocessing def RWR(A, nSteps, laziness, p0", "# test RWR() ''' A = np.array([[0,2,2,0,0,0,0],\\ [2,0,1,1,0,0,0],\\ [2,1,0,0,1,0,0],\\ [0,1,0,0,0,1,1],\\", "f_tr_te(Xj, yj, X_other, model) #if np.unique(yj).size != 1: model_a_clust.fit(Xj, yj)", "30:breast cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast", "fold_i = 1 for train_index, test_index in skf.split(X, y): #", "the cluster-cluster dec_mat to instance-cluster # % dec_mat. This solution", ":] ) #print row row[-1] = row[-1] - adv_whole inst_i_clus", "#f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) # #array([[False, True, True, False], # [ True,", "np.argmax(a) ixs.append(ix) # % 2. iteratively find the rest nodes", "% find the farthest node ix = np.argmin(b) ixs.append(ix) return", "to f_dec_tab_4_bg_svm() in Matlab version ''' n_clusters = len(clus) #", "y_pred_all[:, i+1] = y_pred return y_pred_all # test #y_pred_all =", "dec_mat[i, ix_good_clus] = True dec_ixs.append(ix_good_clus) else: dec_ixs.append([]) return [dec_mat, dec_ixs]", ") clus_ids_to_use = clus_ids_to_use + clus_id_to_use # also find neighbor's", "ix = np.argsort(lens)[::-1] clus_ordered = [clus[i] for i in ix]", "# % set the diagnal to 1 np.fill_diagonal(rw_net, True) clus", "= y_pred_te y_pred = f_quantileNorm(templete, target) else: y_pred = y_pred_te", "# y_pred = y_pred[:, 1].flatten() # # auc = roc_auc_score(y_te.flatten(),", "instance-cluster # % dec_mat. This solution will avoid the case", "= np.array([[0, 0.5566, 0.6448, 0.3289], \\ # [0.5566, 0, -0.0842,", "for other ----------------- ix_other = set(range(0, y.size)) - set(clus[j].flatten()) ix_other", "= np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus = 100 #model = BaggingClassifier(base_estimator", "pickle from sklearn.model_selection import StratifiedKFold #import sklearn from scipy.sparse import", "100, 1000 or 10000, will give the same result. '''", ") ) for i in range(0, len(X)): for j in", "find some nodes as the centroids for soft % clustering.", "print( 'fold ' + str(fold_i) + ' finished') fold_i =", "train_index, \" TEST: \", test_index) X_tr, X_te = X[train_index], X[test_index]", "= '/Users/zg/Dropbox/bio/ICE_2018/' # pa = './' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus", "= [] aucs_whole = [] # f_res = pa +", "we also need to convert clus to a true-false matrix.", "y_pred[:, 1].flatten() # # auc = roc_auc_score(y_te.flatten(), y_pred) # #", "yj = y[clus[j].flatten() ] model_a_clust = copy.deepcopy(model) print(' Cluster '+str(j)+'", "each double array. lens = np.zeros(len(c1)) for i in range(0,", ": if model_i == 0: y_pred_te = np.zeros(len(X_te)) elif model_i", "back. % p0: the initial probability. usually it is a", "[0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;... %", "make it a diagonal matrix # aRankNet = max(aRankNet, aRankNet');", "= rw_mat[:, ixs] b = np.sum(tmp, axis=1) b[ixs] = np.inf", "0 0 0 0 #% 1 1 1 0 0", "to col n_clusters-1 store the predictions by each cluster #", "set the diagnal to 1 np.fill_diagonal(rw_net, True) clus = []", "A could be: % A = [0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;... %", "list( y_tr[cur_nb_ix] ) #print ' clus_ids_to_use:' #print clus_ids_to_use # cluster", "''' % In the example above, spdiags(sum(A)'.^(-1), 0, n, n)", "scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot", "# ------------------ for self ------------------ #if np.unique(yj).size == 1: #", "y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] ) #print y_pred_fo else: y_pred_fo = f_tr_te(X_tr, y_tr,", "FuzzyRwrBagging as frb #from joblib import Parallel, delayed #import multiprocessing", "[0] * n_whole #print ' clus_ids_to_use:' #print clus_ids_to_use #print nei_labels", "we just random pickup some nodes as centroids, that is", "= y[train_index], y[test_index] def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner):", "#f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) # #array([[False, True, True, False], # [", "k): tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten() clus.append(tmp) # ---------------------------------------------------------------", "for a instance, which are the clusters it belongs to,", "y_pred_te = np.zeros(len(X_te)) elif model_i == 1: y_pred_te = np.ones(len(X_te))", "''' X_tr = X y_tr = y X_te = X", "print(repr(e)) # y_pred_te = np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print 'model '+str(i)+'", "= f_err_2_decMat(err_mat, tfs, w, s) print (' Done calucating decision", "y[train_index], y[test_index] [clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model,", "np.inf unchanged = 0 for i in range(1, nSteps+1): if", "warning, #unique class label(s) == 1') y_pred = np.zeros(y.size) y_pred.fill(np.unique(yj)[0])", "% 0 0.2500 0 0 0 0.5000 0.5000 % 0", "# y_pred = np.zeros((y.size, 1)) # # skf = StratifiedKFold(n_splits=k_fold)", "np.argsort(neighbour_col ) ix = ix[::-1] ix_top_neighbors = ix[0:N] #print 'testing", "True, False, False, True], # [ True, False, True, False]])", "y_pred_other, thus do not # % need normalization when predict", "# test #templete = X[:, 0] #target = X[:, 1]", "0 0 0 0 0.5000 % W will be: %", "for fuzzy clustering - one instance may # % belongs", "be 1. % % for example, A could be: %", "scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus", "nodes as centroids, that is % not good for fuzzy", "clusters does one inst belongs to. #overlap_factor = 2; #", "skf = StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X, y) # # for train_index,", "to. #overlap_factor = 2; # the estimated size of each", "diagonal 1s. # aRankNet = aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet, False) return", "models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model) #def f_dec_tab_4_bg_svm(X, y,", "n_clusters+1)) #the extra col is for whole pred # col", "good ' # start from the second col, the result", "cluster id + 1 to make the ix fit the", "the prediction using the whole data # start from the", "for train_index, test_index in skf.split(X, y): # print(\"TRAIN:\", train_index, \"TEST:\",", "BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ #model = BaggingClassifier(base_estimator = svm.LinearSVR(), \\", "dec_ixs does not contain the whole set of instances. '''", "model_a_clust.predict_proba(X_other) y_pred = y_pred[:, 1].flatten() # quantileNorm templete = y_pred_whole[ix_other]", "decision table corresponds to f_dec_tab_4_bg_svm() in Matlab version ''' n_clusters", "f_eu_dist(X) def f_eu_dist2(X1, X2): ''' calculate the euclidean distance between", "10) def getCutoff(rw_mat, avgNeighborsSize): tmp = rw_mat.flatten('F') a = np.flip(np.sort(tmp),", "be: % 0 0.5000 0.5000 0 0 0 0 %", "finished') fold_i = fold_i + 1 auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten()", "= np.argsort(target, kind='mergesort') ix_templete = np.argsort(templete, kind='mergesort') target[ix_target] = templete[ix_templete]", "#from sklearn import metrics from sklearn.metrics import roc_auc_score from sklearn", "y [y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te) ''' #def", "True, True, True], # [ True, False, False, True], #", "Mon Mar 5 05:47:03 2018 @author: zg \"\"\" import numpy", "len(X_te) #print len(test_index) #print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] ) #print y_pred_fo else:", "the same as in Matlab def f_sim_2_aRankNet(sim, k=3): ''' %", "# skf = StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X, y) # # for", "len(X), len(X) )) for i in range(0, len(X)): for j", "when predict y_pred_ICE. # % ixsp is another cluster form.", "models') return [err_mat, models] \"\"\" #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\ # '3_scripts/2017_4_4/data/names.mat')['names']", "row = list( np.where(tfs[i, :] ) [0] ) inst_clus.append(row) return", "- adv_self #print row ix_good_clus = list( np.where( row <", "= y_pred[:, 1].flatten() # # auc = roc_auc_score(y_te.flatten(), y_pred) #", "j) print( 'fold ' + str(fold_i) + ' finished') fold_i", "y.size)) - set(clus[j].flatten()) ix_other = list(ix_other) #print ix_other X_other =", "= BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators = 100", "(1-laziness) * W.dot(p) + laziness * p0 l2norm = max(np.sqrt(sum((pnew", "the diagnal to 1 np.fill_diagonal(rw_net, True) clus = [] for", "for cur_nb in range(0, N): # for each neighbour #", "= list( np.array(a) + 1 ) clus_ids_to_use = a #", "% 0 0 0 0.3333 0 0.5000 0 ''' #W", "0 % 0 0 0 0 0 0.5000 0 %", "% 0 0 0 0 0 0.5000 0 % 0", "list( np.where( row < row[-1] ) [0] ) #print ix_good_clus", "y_pred) # # return [y_pred, auc] def f_tr(X_tr, y_tr, model):", "2; # the estimated size of each cluster. default is", "WHOLE ------------------------- # # # --------------------------- SELF ------------------------- def f_err_mat(X,", "# y_pred_te = np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print 'model '+str(i)+' good", "% laziness: the probablity to go back. % p0: the", "% 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;... % 0.038,", "err_mat = abs(pred_prob_mat - tmp ) print (' Done calculating", "if len(yj) <= 15: print (' '+str(len(yj))+' insts in cluster,", "in range(1, 50): for j in range(1, 49): try: X", "print (' warning, #unique class label(s) == 1') y_pred =", "y_pred = np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0]) # copy to the whole", "# ''' # #n_clusters = len(clus) # ## dec_mat stores", "ix fit the col id in y_pred_all a = clus_ids_to_use", "0 % 0 0 0 0 0 0 0.5000 %", "= BaggingClassifier(base_estimator = svm.LinearSVR(), \\ #model = BaggingClassifier(base_estimator = svm.LinearSVC(),", "neighbour # find each neighbour's pick cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use", "< np.finfo(float).eps: break else: if l2norm == pl2norm: unchanged =", "= f_fuzzy_rwr_clusters(X, n_clus) tfs = f_clus_to_tfs(clus, len(X)) y = y.astype(float)", "distance.euclidean(X[i], X[j]) sim[i][j] = tmp sim[j][i] = tmp sim =", "= y_pred print(' c-'+str(j)+' done predicting local instances') # ------------------", "whole array y_pred_multi[ix_other, j] = y_pred print(' c-'+str(j)+' done predicting", "second col, the result is by each cluster y_pred_all =", "y_tr, X_te, y_te) ''' #def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging):", "# test #[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model)", "< row[-1] ) [0] ) #print ix_good_clus if len(ix_good_clus) >", "mat to re-clustering ''' # rwr based fuzzy clustering clus", "len(clus)): #Xi = X_tr[clus[i].flatten(), :] #yi = y_tr[clus[i].flatten() ] model_i", "to go to that node. % This algorithm will converge.", "models, doNorm=True): ''' Use the training data to predict the", "used, based on parameters alpha, beta and N. n_whole =", "model) #print 'whole model good ' # start from the", "len(X_tr)) # train models and calculate the error-dicision tables y_tr", "0 % 0 0.2500 0 0 0 0.5000 0.5000 %", "= len(A) if p0 == None: p0 = np.eye(n) '''", "#the extra col is for whole pred # # #", "= BaggingClassifier(base_estimator = svm.LinearSVC(), \\ model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'),", "0 #% 1 1 1 1 0 #% 1 1", ") [0] ) inst_clus.append(row) return inst_clus # test #inst_clus =", "clusters it belongs to, thus we also need to convert", "len(X_te) ) neighbour_mat = f_eu_dist2(X_tr, X_te) # ---------- for each", "False, False, False], # [ True, False, False, True], #", "a 1-dimension cell array, and each element is a 1d", "fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs =", "0.014, 0.154, 0.009, 0.425, 0.203;... % 0.017, 0.035, 0.014, 0.154,", "#n_wholes = np.zeros( len(X_te) ) for j in range(0, len(X_te)", "% column means the probability to go to that node.", "model_whole = copy.deepcopy(model) y_pred_whole = f_k_fo(X, y, model_whole, fo_inner) model_whole.fit(X,", "random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X, y,", "# the last col stores the pred by whole data", "A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \\ 0, n, n).toarray() ) p", "all_neibs = int( avgNeighborsSize * len1 ) print( all_neibs) ct", "% on the rw_mat matrix, find some nodes as the", "% sort the clusters lens = f_len_of_each_ele(clus) ix = np.argsort(lens)[::-1]", "X_tr, X_te = X[train_index], X[test_index] # y_tr, y_te = y[train_index],", "data #models = [] # k_fold of inner cross-validation fo_inner", "--------------------------------------------------------------- aRank_k_neighbors = np.ceil(np.log10(len(sim)) ) ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors) print('", "is the reason # % of instability. However, we cannot", "2) ) ) p = pnew if l2norm < np.finfo(float).eps:", "np.zeros( len(X_te) ) neighbour_mat = f_eu_dist2(X_tr, X_te) # ---------- for", "and target should be 1d n by 1 array. f_my_quantileNorm()", "X_te) def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None): # X: data # k:", "instance # find the top 10 neighbors for each test", "re-clustering ''' # rwr based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr,", "templete. Target may have a very different range than the", "StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X, y) # # for train_index, test_index in", "for train_index, test_index in skf.split(X, y): # #print(\"TRAIN:\", train_index, \"TEST:\",", "for each instance, which cluster(s) it belongs to. dec_ixs stores", "(len(sim) - k) # make it a diagonal matrix #", "= np.zeros(( len(X), len(X) )) for i in range(0, len(X)):", "print(' Cluster '+str(j)+' started...') #if len(yj) > 10: if len(yj)", "= inst_clus[i] if len(inst_i_clus) > 0: row[inst_i_clus] = row[inst_i_clus] -", "fold_i = fold_i + 1 auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() )", "0.5000 0.5000 % 0 0 0.2500 0 0 0 0", "= y[train_index] if np.unique(y_tr).size == 1: y_pred_fo = np.zeros( len(test_index)", "= roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j)", "#Xi = X_tr[clus[i].flatten(), :] #yi = y_tr[clus[i].flatten() ] model_i =", "y, clus, \\ y_pred_whole, model, fo_inner) print (' Done evaluation", "------------------ for other ----------------- ix_other = set(range(0, y.size)) - set(clus[j].flatten())", "quantileNorm templete = y_pred_whole[ix_other] target = y_pred y_pred = f_quantileNorm(templete,", "spdiags from scipy.spatial import distance #import matplotlib.pyplot as plt from", "fo_inner): ''' % using each cluster data to predict the", "[0] ) inst_clus.append(row) return inst_clus # test #inst_clus = f_tfs_2_instClus(tfs)", "0 0 0 0.5000 % W will be: % 0", "0) return sim #test #sim = f_eu_dist(X) def f_eu_dist2(X1, X2):", "clusters should be used. dec_ixs = [] inst_clus = f_tfs_2_instClus(tfs)", "f_sim_2_aRankNet(sim, aRank_k_neighbors) print(' done calculating the A-rank KNN graph') #", "to the values in the templete. Target may have a", "for i in range(0, n_iter): clus = f_fuzzy_rwr_clusters(err_mat, n_clus) tfs", "0] = f_tr_te(X_tr, y_tr, X_te, model) #print 'whole model good", "spdiags(sum(A)'.^(-1), 0, n, n); #W = spdiags(np.power(sum(np.float64(A)) , -1).T ,", "[0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;... % 0.207, 0.425,", "def f_eu_dist2(X1, X2): ''' calculate the euclidean distance between instances", "#sim = f_eu_dist(X) def f_eu_dist2(X1, X2): ''' calculate the euclidean", "KNN graph') # % -------- RWR -------- nSteps = 1000", "0.463, 0.052, 0.324, 0.028, 0.028;... % 0.048, 0.099, 0.039, 0.431,", "0.3333 0 0.5000 0 ''' #W = A * spdiags(sum(A)'.^(-1),", "clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr))", "f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True): ''' Use", "target = y_pred y_pred = f_quantileNorm(templete, target) # copy the", "Parallel, delayed #import multiprocessing def RWR(A, nSteps, laziness, p0 =", "as KNN pred # instead of using whole to pred.", "#else: try: y_pred = f_k_fo(Xj, yj, model, fo_inner) # quantileNorm", "dec_ixs = [] inst_clus = f_tfs_2_instClus(tfs) for i in range(0,", "version use the err mat to re-clustering ''' # rwr", "f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model) #def f_dec_tab_4_bg_svm(X, y, clus): #", "cluster form. # # corresponds to f_dec_tab_4_bg_svm() in Matlab version", "top k edges. #aRankNet = (I2 >length(sim)-k); aRankNet = I2", "False], # [ True, False, False, True], # [ True,", "return y_pred def f_tr_te(X_tr, y_tr, X_te, model): ''' corresponds to", "0.027, 0.232, 0.232;... % 0.038, 0.029, 0.081, 0.009, 0.356, 0.004,", "# dec_ixs: for each instance, which clusters should be used.", "% 0.2500 0 0 0 0 0 0 % 0", "0.5566, 0.6448, 0.3289], \\ # [0.5566, 0, -0.0842, -0.0170], \\", "0.052, 0.324, 0.028, 0.028;... % 0.048, 0.099, 0.039, 0.431, 0.027,", "testing data. ''' y_pred_all = np.zeros(( len(X_te), len(clus) + 1", "# ## dec_mat stores the prediction error. # #pred_mat=np.zeros((y.size, n_clusters+1))", "node as the first centroid. a = np.sum(rw_mat, axis=1) #", "number of # instances. each_clus_sz=len(X)/3 print('RWR-based fuzzy clustering starts...') print('", "each_clus_sz=None): # X: data # k: number of clusters '''", "joblib import Parallel, delayed #import multiprocessing def RWR(A, nSteps, laziness,", "= np.eye(n) ''' % In the example above, spdiags(sum(A)'.^(-1), 0,", "len(c1[i]) return lens def f_eu_dist(X): ''' calculate the euclidean distance", "''' Templete is the standard, change the target to the", "= f_ICE_fit(X_tr, y_tr, n_clus, model, w, s) #[clus, models, dec_ixs]", "Target may have a very different range than the templete.", "started...') #if len(yj) > 10: if len(yj) > 15 and", "round( alpha*n_partial + beta*N ) ) clus_ids_to_use = clus_ids_to_use +", "= X[train_index], X[test_index] #y_tr, y_te = y[train_index], y[test_index] y_tr =", "table and fitting ICE models') return [err_mat, models] \"\"\" #mat", "by 1 array. f_my_quantileNorm() ''' ix_target = np.argsort(target, kind='mergesort') ix_templete", "getCutoff(rw_mat, each_clus_sz) rw_net = rw_mat > ct # % set", "= scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3", "= dec_row(:, clus_id) - adv_self; row = np.copy( err_mat[i, :]", "is another cluster form. # # corresponds to f_dec_tab_4_bg_svm() in", "model good ' # start from the second col, the", "dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s) y_pred_ICE =", "# bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators", "\\ # random_state=None, n_estimators = 100 ) model = svm.SVC(kernel='linear',", "y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model) y_preds_whole[test_index] = y_pred_whole print(", "ix = ix[::-1] ix_top_neighbors = ix[0:N] #print 'testing inst '", "= y_pred[:, 1].flatten() return y_pred def f_tr_te(X_tr, y_tr, X_te, model):", "will avoid the case that if one cluster decision #", "y_pred.fill(np.unique(yj)[0]) # copy to the whole array y_pred_multi[ix_other, j] =", "changed from the cluster-cluster dec_mat to instance-cluster # % dec_mat.", "np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool) # dec_ixs: for each instance,", "= True) skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE = np.zeros(", "y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred if np.unique(yj).size", "True, False, False, True], # [ True, False, False, True],", "= y_pred[:, 1].flatten() #auc = roc_auc_score(y_te.flatten(), y_pred) return y_pred def", "I will try using the predicted prob at first. #", "0.203, 0.425] % % Each column represents the propability for", "X_te, clus, models) y_pred_ICE = np.zeros( len(X_te) ) neighbour_mat =", "len(X1) ): for j in range(0, len(X2) ): tmp =", "' #y_pred_te = f_tr_te(Xi, yi, X_te, model) if doNorm ==", "= len(clus_ids_to_use) # number of whole models used, based on", "zero matrix with the diag to % be 1. %", "A = np.array([[0,2,2,0,0,0,0],\\ [2,0,1,1,0,0,0],\\ [2,1,0,0,1,0,0],\\ [0,1,0,0,0,1,1],\\ [0,0,1,0,0,0,0],\\ [0,0,0,1,0,0,1],\\ [0,0,0,1,0,1,0]]) nSteps", "also need to convert clus to a true-false matrix. '''", "= f_fuzzy_rwr_clusters(X, 100) # pass def f_clus_to_tfs(clus, n_inst): #% convert", "copy.deepcopy(model) print(' Cluster '+str(j)+' started...') #if len(yj) > 10: if", "bagging.fit(X_tr, y_tr) # # y_pred = bagging.predict_proba(X_te) # y_pred =", "the result is by each cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te,", "as the first centroid. a = np.sum(rw_mat, axis=1) # axis=1", "f_res = pa + 'data/res_ICE_SVM_py.txt' f = open(f_res, 'w') #for", "* n_whole #print ' clus_ids_to_use:' #print clus_ids_to_use #print nei_labels y_pred_ICE[j]", "True], # [ True, False, False, True], # [ True,", "for rows; 0 for col # % most connected node.", "instance, the rank of clusters information will be lost -", "bagging.predict_proba(X_te) # y_pred = y_pred[:, 1].flatten() # # auc =", "''' # Calculate the decision table # % This version", "This version 3 adds the support for fuzzy clustering -", "False, False], # [ True, False, False, True], # [", "k edges. #aRankNet = (I2 >length(sim)-k); aRankNet = I2 >", "start node np.fill_diagonal(rw, 0) rw_mat = rw print(' done RWR')", "= X y_tr = y X_te = X y_te =", "test #pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer", "% 2. iteratively find the rest nodes for i in", "[ True, False, False, True], # [ True, False, False,", "also find neighbor's label. maybe will be used later as", "to, thus we also need to convert clus to a", "instead of using whole to pred. nei_labels = nei_labels +", "of # instances. each_clus_sz=len(X)/3 print('RWR-based fuzzy clustering starts...') print(' NO.", "0 0.5000 % 0 0 0 0.3333 0 0.5000 0", "# predict the whole instances using each cluster data, while", "to f_use_each_clus_forWhole_bg_svm() in Matlab version ''' n_clusters = len(clus) y_pred_multi", "p # test RWR() ''' A = np.array([[0,2,2,0,0,0,0],\\ [2,0,1,1,0,0,0],\\ [2,1,0,0,1,0,0],\\", "1].flatten() #auc = roc_auc_score(y_te.flatten(), y_pred) return y_pred def f_k_fo(X, y,", "target) # copy the normed prediction to the whole data.", "print (' Done calucating decision table') return [clus, models, dec_ixs]", "belongs to more than one cluster. # % This updated", "cluster size = '+str(each_clus_sz) ) # sim = squareform(pdist(X)); #", "1].flatten() return y_pred def f_tr_te(X_tr, y_tr, X_te, model): ''' corresponds", "same information that clus is the instances ids for each", "0 0 0.3333 0 0 0.5000 % 0 0 0", "1 for train_index, test_index in skf.split(X, y): # print(\"TRAIN:\", train_index,", "each cluster. However, this data structure is not easy to", "#dec_row = dec_mat(cur_nb_ix, :); #dec_row(:, end ) = dec_row(:, end", "0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;... % 0.017, 0.035,", "p) ** 2) ) ) p = pnew if l2norm", "''' sim = np.zeros(( len(X), len(X) )) for i in", "#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators =", "[0,0,0,1,0,0,1],\\ [0,0,0,1,0,1,0]]) nSteps = 1000 lazi = 0.3 RWR(A, nSteps,", "0.167, 0.463, 0.052, 0.324, 0.028, 0.028;... % 0.048, 0.099, 0.039,", "nSteps = 1000 lazi = 0.3 RWR(A, nSteps, lazi, None)", ") b = np.matlib.repmat(a, 100, 1) ct = getCutoff(b, 70)", "= tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators = 100 ) #y_pred", "# y_pred = np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0]) # copy to the", "Start to evaluate each cluster ') # --------------------------- SELF -------------------------", "err_mat[0].size-1 ), dtype=bool) # dec_ixs: for each instance, which clusters", "each cluster. default is half the number of # instances.", "= unchanged +1 if unchanged > 10: break else: unchanged", "y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError as e: # print(repr(e))", "k_fold of inner cross-validation fo_inner = 5 # --------------------------- WHOLE", "BaggingClassifier(base_estimator = svm.LinearSVC(), \\ # bagging = BaggingClassifier(BaseBagging, \\ #", "y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ # random_state=None, n_estimators", "prediction error. # #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for", "np.zeros((n_inst, len(clus)), dtype=bool) for i in range(0, len(clus)): tfs[clus[i], i]", "= X y_te = y [y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr,", "#aRankNet = (I2 >length(sim)-k); aRankNet = I2 > (len(sim) -", "thus do not # % need normalization when predict y_pred_ICE.", "store the predictions by each cluster # the last col", "y_pred = np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0]) #else: try: y_pred = f_k_fo(Xj,", "def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1): ''' clus and", "# find each neighbour's pick cur_nb_ix = ix_top_neighbors[cur_nb] clus_id_to_use =", "(' Done calucating decision table') return [clus, models, dec_ixs] def", "# test #pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast", "np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print 'model '+str(i)+' good ' #y_pred_te =", "1x169 double 1x161 double 1x62 double #% #% tfs e.g:", "0] target = y_pred_te y_pred = f_quantileNorm(templete, target) else: y_pred", "'/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa = '/Users/zg/Dropbox/bio/ICE_2018/' # pa = './' pa", "% 100, 1000 or 10000, will give the same result.", "1 #% 1 0 0 0 0 #% 1 1", "y) # fit a model using all data rather than", "= np.inf # % find the farthest node ix =", "rather than only a fold pred_prob_mat[:, n_clusters] = y_pred_whole print", "Use whole training data to predict Use each cluster of", "# pa = '/Users/zg/Dropbox/bio/ICE_2018/' # pa = './' pa =", "X_te = X[train_index], X[test_index] y_tr, y_te = y[train_index], y[test_index] [clus,", "similarity matrix to a network graph where each node %", "0) rw_mat = rw print(' done RWR') # --------------------------------------------------------------- ixs_centers", "[clus[i] for i in ix] print(' center inst. index of", "using 10-fold CV. [y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \\", "0) len1 = len(rw_mat) #cutoffs = [] all_neibs = int(", "# % of instability. However, we cannot use a systematic", "\"\"\" def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5): ''' Convert the err", "1000 lazi = 0.3 RWR(A, nSteps, lazi, None) ''' #", "a diagonal matrix # aRankNet = max(aRankNet, aRankNet'); aRankNet =", "print(' done calculating the A-rank KNN graph') # % --------", "dtype=bool) # dec_ixs: for each instance, which clusters should be", "j in range(0, n_clusters): # for each cluster Xj =", "= np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool) # dec_ixs: for each", "import BaggingClassifier from sklearn import svm #from sklearn import metrics", ") p = p0 pl2norm = np.inf unchanged = 0", "fit a model using all data rather than only a", "-*- coding: utf-8 -*- \"\"\" Created on Mon Mar 5", "******************** n_iter = 2 for i in range(0, n_iter): clus", "---------- #n_partials = np.zeros( len(X_te) ) #n_wholes = np.zeros( len(X_te)", "= scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto') #plt.show() #sim =", "it belongs to. #% #% clus e.g: #% 1x5 cell", "node. % This algorithm will converge. For example, for the", "0 0 0 0 #% 1 1 1 1 0", "corresponds to f_k_fo() # ''' # y_pred = np.zeros((y.size, 1))", "col is the prediction using the whole data # start", "for each neighbour # find each neighbour's pick cur_nb_ix =", "and laziness is 0.3, p0 is default, the result is:", "another cluster form. # # corresponds to f_dec_tab_4_bg_svm() in Matlab", "to more than one cluster. # % This updated version", "coding: utf-8 -*- \"\"\" Created on Mon Mar 5 05:47:03", "predict y_pred_ICE. # % ixsp is another cluster form. #", "\\ y_pred_whole, model, fo_inner) print (' Done evaluation using each", "self % prediction using 10-fold CV. corresponds to f_use_each_clus_forWhole_bg_svm() in", "y_tr, X_te, y_te): # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\", "the same information that clus is the instances ids for", ") clus_ids_to_use = clus_ids_to_use + [0] * n_whole #print '", "Convert the similarity matrix to a network graph where each", "while self # prediction using 10-fold CV. [y_pred_multi, models] =", "roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] def f_tr(X_tr, y_tr,", "graph') # % -------- RWR -------- nSteps = 1000 lazi", "data to predict the whole instances, while self % prediction", "in Matlab version ''' #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\", "the whole data. y_pred_multi[clus[j].flatten(), j] = y_pred print(' c-'+str(j)+' done", "array a stores y tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1)", "Matlab version # corresponds to f_k_fo() # ''' # y_pred", "metrics from sklearn.metrics import roc_auc_score from sklearn import tree import", "-sim np.fill_diagonal(sim, 0) return sim #test #sim = f_eu_dist(X) def", "Done calucating decision table') return [clus, models, dec_ixs] def f_ICE_pred(X_tr,", "self # prediction using 10-fold CV. [y_pred_multi, models] = f_use_each_clus_forWhole(X,", "-sim; sim = np.zeros((len(X), len(X) ) ) for i in", "Matlab version ''' #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ #bagging", "[0.6448, -0.0842, 0, 0.8405], \\ # [0.3289, -0.0170, 0.8405, 0]])", "model, w=0.4, s=0.5): ''' ''' # rwr based fuzzy clustering", "#!/usr/bin/env python2 # -*- coding: utf-8 -*- \"\"\" Created on", "neighbors' picks ---------- clus_ids_to_use = [] nei_labels = [] for", "nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done predicting testing instances.')", "wrong leading entrie cluster prediction is wrong, which is the", "dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print (' Done calucating", "= sort(I); I2 = (np.argsort(I, kind='mergesort').T + 1).T # for", "algorithm. % A is the input net matrix, with the", "np.array(a) + 1 ) clus_ids_to_use = a # number of", "first. # # % This version 3 adds the support", "tmp sim = -sim return sim #test #sim = f_eu_dist2(X_tr,", "np.unique(yj).size == 1: # y_pred = np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0]) #else:", "col n_clusters-1 store the predictions by each cluster # the", "i in range(0, len(err_mat)): # Matlab code: #dec_row = dec_mat(cur_nb_ix,", "np.array(ixs_centers) print(ixs_centers[ix]) print(' size of each cluster: ') print(lens[ix]) print('", "from sklearn.metrics import roc_auc_score from sklearn import tree import copy", "== None: # on average, how many clusters does one", "sort(sim-diag(diag(sim) ) ); I = np.argsort(sim, kind='mergesort') + 1 #", "1: print (' warning, #unique class label(s) == 1') y_pred", "= 2; # the estimated size of each cluster. default", "0, n, n); #W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0,", "p = pnew if l2norm < np.finfo(float).eps: break else: if", "neighbor's label. maybe will be used later as KNN pred", "0.425, 0.167, 0.132, 0.117, 0.071, 0.071;... % 0.220, 0.167, 0.463,", "[] all_neibs = int( avgNeighborsSize * len1 ) print( all_neibs)", "1: # ------------------ for self ------------------ #if np.unique(yj).size == 1:", "svm.SVC(kernel='linear'), \\ random_state=None, n_estimators = 100 ) [err_mat, models] =", "testing instances.') return y_pred_ICE # test # pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'", "axis=1) # axis=1 for rows; 0 for col # %", "False, False, True], # [ True, False, False, True], #", "= f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model) #def f_dec_tab_4_bg_svm(X, y, clus):", "= np.zeros((y.size, 1)) # # skf = StratifiedKFold(n_splits=k_fold) # skf.get_n_splits(X,", "= [] # k_fold of inner cross-validation fo_inner = 5", "dec_ixs] #def_deal_miss_v_1(d): ''' deal with missing values by replacing them", "ix of top neighbors:' #print ix_top_neighbors # ---------- find all", "max(np.sqrt(sum((pnew - p) ** 2) ) ) p = pnew", "skf.split(X, y): # #print(\"TRAIN:\", train_index, \"TEST:\", test_index) # X_tr, X_te", "= np.zeros(len(X_te)) # y_pred_te.fill(np.nan) #print 'model '+str(i)+' good ' #y_pred_te", "def getCutoff(rw_mat, avgNeighborsSize): tmp = rw_mat.flatten('F') a = np.flip(np.sort(tmp), 0)", "find all neighbors' picks ---------- clus_ids_to_use = [] nei_labels =", "0.203;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425] %", "for each cluster Xj = X[clus[j].flatten(), :] yj = y[clus[j].flatten()", "nodes (aRank). ''' # delete the diagnal values. # sim", "range(i+1, len(X)): tmp = distance.euclidean(X[i], X[j]) sim[i][j] = tmp sim[j][i]", "print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) + '\\t' + str(auc_ICE)", "entrie cluster prediction is wrong, which is the reason #", "WHOLE ------------------------- # Predict each cluster using the whole data.", "* spdiags(sum(A)'.^(-1), 0, n, n); #W = spdiags(np.power(sum(np.float64(A)) , -1).T", "def RWR(A, nSteps, laziness, p0 = None): ''' % the", "range(1, nSteps+1): if i % 100 == 0: print(' done", "= BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators = 100 ) model_inner", "'\\t' + str(auc_ICE) + ' \\t ' + str(auc_whole) +", "> 10: if len(yj) > 15 and np.unique(yj).size != 1:", "adds the support for fuzzy clustering - one instance may", "each cluster data, while self # prediction using 10-fold CV.", "error-dicision tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'),", "else: unchanged = 0 pl2norm = l2norm return p #", "0 0 0 0.3333 0 0 0 % 0 0", "len(yj) > 10: if len(yj) > 15 and np.unique(yj).size !=", "#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ # random_state=None, n_estimators =", "each test instance neighbour_col = neighbour_mat[:, j].flatten() ix = np.argsort(neighbour_col", "try: y_pred_te = f_te(X_te, model_i) except : if model_i ==", "by replacing them by mean. ''' def f_ICE_fit_2(X_tr, y_tr, n_clus,", "= % 100, 1000 or 10000, will give the same", "# #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) # #array([[False, True, True, False], #", "#print 'testing inst ' + str(j) #print ' ix of", "#% 1 1 1 1 1 #% 1 0 0", "whole pred # col 0 to col n_clusters-1 store the", "cluster decision # % is wrong leading entrie cluster prediction", "y_preds_ICE = np.zeros( y.size ) y_preds_whole = np.zeros( y.size )", "#model = BaggingClassifier(base_estimator = svm.LinearSVR(), \\ #model = BaggingClassifier(base_estimator =", "structure is not easy to find for a instance, which", "in y_pred_self and y_pred_other, thus do not # % need", "# [ True, False, False, True], # [False, False, True,", "W will be: % 0 0.5000 0.5000 0 0 0", "1 array. f_my_quantileNorm() ''' ix_target = np.argsort(target, kind='mergesort') ix_templete =", "row[-1] - adv_whole inst_i_clus = inst_clus[i] if len(inst_i_clus) > 0:", "model, fo_inner) # quantileNorm templete = y_pred_whole[clus[j].flatten()] target = y_pred", "data rather than only a fold pred_prob_mat[:, n_clusters] = y_pred_whole", "row[inst_i_clus] - adv_self #print row ix_good_clus = list( np.where( row", "models and calculate the error-dicision tables y_tr = y_tr.astype(float) #model", "f_quantileNorm(templete, target): ''' Templete is the standard, change the target", "copy.deepcopy(model) try: y_pred_te = f_te(X_te, model_i) except : if model_i", "# #fo_inner = 10 # # --------------------------- WHOLE ------------------------- #", "clus, model) # ******************************************************* [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w,", "# number of whole models used, based on parameters alpha,", "= 2 for i in range(0, n_iter): clus = f_fuzzy_rwr_clusters(err_mat,", "y, clus, y_pred_whole, model, fo_inner): ''' % using each cluster", "= pnew if l2norm < np.finfo(float).eps: break else: if l2norm", "kind='mergesort') ix_templete = np.argsort(templete, kind='mergesort') target[ix_target] = templete[ix_templete] new =", "templete[ix_templete] new = target return new # test #templete =", "while inst_clus stores that for each instance, which cluster(s) it", "test_index) X_tr, X_te = X[train_index], X[test_index] y_tr, y_te = y[train_index],", "= y_pred_whole[clus[j].flatten()] target = y_pred y_pred = f_quantileNorm(templete, target) #", "all results are correct. def f_quantileNorm(templete, target): ''' Templete is", "version changed from the cluster-cluster dec_mat to instance-cluster # %", "nodes for i in range(1, k): tmp = rw_mat[:, ixs]", "= ix_top_neighbors[cur_nb] clus_id_to_use = list( dec_ixs[cur_nb_ix] ) clus_ids_to_use = clus_ids_to_use", "one instance may # % belongs to more than one", "f_tfs_2_instClus(tfs) for i in range(0, len(err_mat)): # Matlab code: #dec_row", "def f_eu_dist(X): ''' calculate the euclidean distance between instances '''", "2, 4]) ''' a = np.array( range(0,100) ) b =", "scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ # random_state=None,", "double #% #% tfs e.g: #% 295x5 double #% 1", "y_tr, X_te, model) y_preds_whole[test_index] = y_pred_whole print( j) print( 'fold", "table corresponds to f_dec_tab_4_bg_svm() in Matlab version ''' n_clusters =", "to go back. % p0: the initial probability. usually it", "0.599 # all results are correct. def f_quantileNorm(templete, target): '''", "roc_auc_score from sklearn import tree import copy import numpy.matlib from", "centroids, that is % not good for fuzzy clusters. %", "# Calculate the decision table # % This version changed", "clustering - one instance may # % belongs to more", "# copy the normed prediction to the whole data. y_pred_multi[clus[j].flatten(),", "= 0 for i in range(1, nSteps+1): if i %", "scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X']", "each element in the % column means the probability to", "BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators = 100 )", "import numpy as np #from scipy import io import scipy.io", "y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ #", "return model_inner def f_te(X_te, model): y_pred = model.predict_proba(X_te) y_pred =", "for train_index, test_index in skf.split(X, y): #print(\"TRAIN: \", train_index, \"", "#>>> a = np.array([[1,2], [3,4]]) #>>> a.flatten() #array([1, 2, 3,", "whole data # start from the second col, the result", "return ct #test #>>> a = np.array([[1,2], [3,4]]) #>>> a.flatten()", "whole to pred. nei_labels = nei_labels + list( y_tr[cur_nb_ix] )", "0 0.5000 % W will be: % 0 0.5000 0.5000", "clustering result to for each instance, what clusters it belongs", "the most connected center node as the first centroid. a", "y. # % support more than 3 clusters # %", "True], # [False, False, True, False]]) # #array([[False, True, True,", "''' if each_clus_sz == None: # on average, how many", "# # auc = roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred,", "skip this cluster') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] =", "the prediction error. pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for", "model) def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' '''", "#n_clus = 100 #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ #", "#inst_clus = f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te): # #bagging", "c-'+str(j)+' done predicting remote instances') except ValueError as e: print(e)", "c1 is a 1-dimension cell array, and each element is", "1.0000 0 0 % 0 0.2500 0 0 0 0.5000", ") model = svm.SVC(kernel='linear', probability = True) skf = StratifiedKFold(n_splits=k_fold)", "j] = y_pred print(' c-'+str(j)+' done predicting local instances') #", "should be used. dec_ixs = [] inst_clus = f_tfs_2_instClus(tfs) for", "\\ # random_state=None, n_estimators = 100 ) model_inner = copy.deepcopy(model)", "= scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X =", "break else: unchanged = 0 pl2norm = l2norm return p", "pl2norm = l2norm return p # test RWR() ''' A", "the probablity to go back. % p0: the initial probability.", "matrix with the diag to % be 1. % %", "= np.inf unchanged = 0 for i in range(1, nSteps+1):", "f_err_mat(X_tr, y_tr, clus, model) # ******************** re-clustering ******************** n_iter =", "result is: % [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;...", "Cluster '+str(j)+' started...') #if len(yj) > 10: if len(yj) >", "#>>> a.flatten() #array([1, 2, 3, 4]) #>>> a.flatten('F') #array([1, 3,", "calucating decision table') return [clus, models, dec_ixs] def f_ICE_pred(X_tr, y_tr,", "squareform(pdist(X)); # sim = -sim; sim = np.zeros((len(X), len(X) )", "f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner): ''' % using each", "NotFittedError as e: # print(repr(e)) # y_pred_te = np.zeros(len(X_te)) #", "for each cluster, while inst_clus stores that for each instance,", "models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model)", "len(clus)): tfs[clus[i], i] = True return tfs # test #tfs", "= model_a_clust.predict_proba(X_other) y_pred = y_pred[:, 1].flatten() # quantileNorm templete =", "= y_pred_fo #auc = roc_auc_score(y.flatten(), y_pred) return y_pred # test", "w=0.4, s=0.5): ''' This version use the err mat to", "def f_len_of_each_ele(c1): #% Assume c1 is a 1-dimension cell array,", "#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model", "and N. n_whole = int( round( alpha*n_partial + beta*N )", "#model = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ model = BaggingClassifier(base_estimator =", "[2,0,1,1,0,0,0],\\ [2,1,0,0,1,0,0],\\ [0,1,0,0,0,1,1],\\ [0,0,1,0,0,0,0],\\ [0,0,0,1,0,0,1],\\ [0,0,0,1,0,1,0]]) nSteps = 1000 lazi", "in range(0, len(X1) ): for j in range(0, len(X2) ):", "#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\", "is 1000 and laziness is 0.3, p0 is default, the", "= target return new # test #templete = X[:, 0]", "target = y_pred_te y_pred = f_quantileNorm(templete, target) else: y_pred =", "y[clus[j].flatten() ] model_a_clust = copy.deepcopy(model) print(' Cluster '+str(j)+' started...') #if", "= open(f_res, 'w') #for j in range(1, 50): for j", "print ('Done predicting testing instances.') return y_pred_ICE # test #", "np.unique(yj).size == 1: print (' warning, #unique class label(s) ==", "table to decision table. ''' dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1", "on the rw_mat matrix, find some nodes as the centroids", "= np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j] = y_pred model_a_clust = np.unique(yj)[0]", "# [~, I2] = sort(I); I2 = (np.argsort(I, kind='mergesort').T +", "a[all_neibs] return ct #test #>>> a = np.array([[1,2], [3,4]]) #>>>", "= f_clus_to_tfs(clus, len(X_tr)) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)", "each testing instance # find the top 10 neighbors for", "# 30:breast cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] #", "test #[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model) #def", "laziness: the probablity to go back. % p0: the initial", "to convert clus to a true-false matrix. ''' if each_clus_sz", "the err table to decision table. ''' dec_mat = np.zeros((", "scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] #", "predicted values of y. # % support more than 3", "0 0 0 % 0 0 0 0.3333 0 0", "0.5000 0 0 0 0 % 0.5000 0 0.2500 0.3333", "variable clus stores the instance indices for each cluster. However,", "False], # [ True, False, False, False], # [ True,", "0 0 % 0 0 0 0.3333 0 0 0.5000", "k=100, each_clus_sz=None): # X: data # k: number of clusters", "using the whole data. model_whole = copy.deepcopy(model) y_pred_whole = f_k_fo(X,", "#y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y =", "print(' c-'+str(j)+' done predicting local instances') # ------------------ for other", "sklearn.metrics import roc_auc_score from sklearn import tree import copy import", "y_pred_multi[ix_other, j] = y_pred print(' c-'+str(j)+' done predicting remote instances')", ">length(sim)-k); aRankNet = I2 > (len(sim) - k) # make", "predicting testing instances.') return y_pred_ICE # test # pa =", "a = np.flip(np.sort(tmp), 0) len1 = len(rw_mat) #cutoffs = []", "#% 1x195 double 1x193 double 1x169 double 1x161 double 1x62", "- you won't know #% what is the top 1/2/3", "lens def f_eu_dist(X): ''' calculate the euclidean distance between instances", "# random_state=None, n_estimators = 100 ) model_inner = copy.deepcopy(model) model_inner.fit(X_tr,", "such as AUC, I will try using the predicted prob", "the second col, the result is by each cluster y_pred_all", "to pred. nei_labels = nei_labels + list( y_tr[cur_nb_ix] ) #print", "1, n_clusters+1) err_mat = abs(pred_prob_mat - tmp ) print ('", "done predicting local instances') # ------------------ for other ----------------- ix_other", "str(auc_ICE) + ' \\t ' + str(auc_whole) + '\\n') except:", "0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;... % 0.038, 0.029,", "= pa + 'data/res_ICE_SVM_py.txt' f = open(f_res, 'w') #for j", "= y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ # random_state=None,", "fold_i + 1 auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole =", "ct # % set the diagnal to 1 np.fill_diagonal(rw_net, True)", "% 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425] % %", "all_neibs) ct = a[all_neibs] return ct #test #>>> a =", "= f_ICE_tr_te_all_clus(X, X, clus, model) def f_ICE_fit(X_tr, y_tr, n_clus, model,", "l2norm < np.finfo(float).eps: break else: if l2norm == pl2norm: unchanged", "y_pred = y_pred[:, 1].flatten() #auc = roc_auc_score(y_te.flatten(), y_pred) return y_pred", "the err mat to re-clustering ''' # rwr based fuzzy", "pred # instead of using whole to pred. nei_labels =", "1 to make the ix fit the col id in", "None) ''' # test #dst = distance.euclidean(A) # corrent, the", "# test # pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa = '/Users/zg/Dropbox/bio/ICE_2018/'", ") ); I = np.argsort(sim, kind='mergesort') + 1 # [~,", "f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa + 'data/res_ICE_bg_svm_py.txt'", "n); #W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray()", "= BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ # random_state=None, n_estimators = 100", "#print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] ) #print y_pred_fo else: y_pred_fo = f_tr_te(X_tr,", "= y_pred else: if len(yj) <= 15: print (' '+str(len(yj))+'", "f_te(X_te, model): y_pred = model.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() return", "print (' Start to evaluate each cluster ') # ---------------------------", "0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;... % 0.017, 0.035,", "scipy.sparse import spdiags from scipy.spatial import distance #import matplotlib.pyplot as", "k) ct = getCutoff(rw_mat, each_clus_sz) rw_net = rw_mat > ct", "roc_auc_score(y.flatten(), y_pred) return y_pred # test #pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X", "f_te(X_te, model_i) except : if model_i == 0: y_pred_te =", "clus and inst_clus contains the same information that clus is", "bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators =", "= X[:, 1] #new = f_quantileNorm(templete, target) #def f_bg_k_fo_3(X, y,", "cross-validation fo_inner = 5 # --------------------------- WHOLE ------------------------- # Predict", "np #from scipy import io import scipy.io #import pickle from", "model_inner.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() #auc = roc_auc_score(y_te.flatten(), y_pred) return", "= bagging.predict_proba(X_te) # y_pred = y_pred[:, 1].flatten() # # auc", "= y [y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te) '''", "# [0.6448, -0.0842, 0, 0.8405], \\ # [0.3289, -0.0170, 0.8405,", "is the prediction using the whole data model_whole = models[-1]", "of clusters information will be lost - you won't know", "print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz)", "#print ix_good_clus if len(ix_good_clus) > 0: dec_mat[i, ix_good_clus] = True", "# % dec_mat. This solution will avoid the case that", "= np.logical_or(aRankNet, aRankNet.T) # remove the diagonal 1s. # aRankNet", "whole instances') print (' Start to evaluate each cluster ')", "[0] ) #print ix_good_clus if len(ix_good_clus) > 0: dec_mat[i, ix_good_clus]", "1 1 0 #% ... #% 1 1 1 1", "from scipy.sparse import spdiags from scipy.spatial import distance #import matplotlib.pyplot", "y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y =", "0.6448, 0.3289], \\ # [0.5566, 0, -0.0842, -0.0170], \\ #", "than one cluster. # % This updated version also outputs", "True], # [ True, False, False, False], # [ True,", "range(0, len(tfs)): row = list( np.where(tfs[i, :] ) [0] )", "= svm.SVC(kernel='linear'), \\ random_state=None, n_estimators = 100 ) [err_mat, models]", "= roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print", "# aRankNet = aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet, False) return aRankNet #", "templete. templete and target should be 1d n by 1", "X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X", "will converge. For example, for the above matrix, nSteps =", "centroid. a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0", "f_eu_dist(X): ''' calculate the euclidean distance between instances ''' sim", "-0.0842, -0.0170], \\ # [0.6448, -0.0842, 0, 0.8405], \\ #", "len(clus)), dtype=bool) for i in range(0, len(clus)): tfs[clus[i], i] =", "more than one cluster. # % This updated version also", "nSteps, lazi) # remove probability of returning start node np.fill_diagonal(rw,", "ixsp is another cluster form. # # corresponds to f_dec_tab_4_bg_svm()", "# pa = './' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100", "f_weka_bg_svm_tr_te() in Matlab version ''' #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(),", "cancer get 0.599 # all results are correct. def f_quantileNorm(templete,", "len(X_te) ) for j in range(0, len(X_te) ): # for", "# --------------------------- WHOLE ------------------------- # # # --------------------------- SELF -------------------------", "inst_clus contains the same information that clus is the instances", "= tree.DecisionTreeClassifier(), \\ #model = BaggingClassifier(base_estimator = svm.LinearSVR(), \\ #model", "dec_mat to instance-cluster # % dec_mat. This solution will avoid", "= f_k_fo(X, y, model, k_fold=10) # #print roc_auc_score(y.flatten(), y_pred) #", "inst_clus # test #inst_clus = f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr, y_tr, X_te,", "f_eu_dist2(X_tr, X_te) def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None): # X: data #", "0.009, 0.425, 0.203;... % 0.017, 0.035, 0.014, 0.154, 0.009, 0.203,", "len1 ) print( all_neibs) ct = a[all_neibs] return ct #test", "def f_err_mat(X, y, clus, model): ''' Calculate the decision table", "cluster(s) for each instance, which may include even a remote", "data to predict Use each cluster of training data to", "0, 0.8405], \\ # [0.3289, -0.0170, 0.8405, 0]]) # #f_sim_2_aRankNet(sim,1)", "# corrent, the same as in Matlab def f_sim_2_aRankNet(sim, k=3):", "f_len_of_each_ele(c1): #% Assume c1 is a 1-dimension cell array, and", "X_te, model) if doNorm == True: templete = y_pred_all[:, 0]", "0 0 0 0 0 0 0.5000 % W will", "ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors) print(' done calculating the A-rank KNN", "scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']", "each cluster: ') ixs_centers = np.array(ixs_centers) print(ixs_centers[ix]) print(' size of", "unchanged +1 if unchanged > 10: break else: unchanged =", "-1).T , 0, n, n).toarray() W = A.dot( spdiags(np.power(sum(np.float64(A)) ,", "centroids. ''' ixs = [] # 1. find the most", "deal with missing values by replacing them by mean. '''", "if p0 == None: p0 = np.eye(n) ''' % In", "model, fo_inner): ''' % using each cluster data to predict", "easy to find for a instance, which are the clusters", "the length of each double array. lens = np.zeros(len(c1)) for", "True, False, False, False], # [ True, False, False, True],", "case that if one cluster decision # % is wrong", "1) ct = getCutoff(b, 70) ''' def f_len_of_each_ele(c1): #% Assume", "= list( dec_ixs[cur_nb_ix] ) clus_ids_to_use = clus_ids_to_use + clus_id_to_use #", "BaggingClassifier(base_estimator = svm.LinearSVR(), \\ #model = BaggingClassifier(base_estimator = svm.LinearSVC(), \\", "2018 @author: zg \"\"\" import numpy as np #from scipy", "information that clus is the instances ids for each cluster,", "-0.0170], \\ # [0.6448, -0.0842, 0, 0.8405], \\ # [0.3289,", "\\ random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X,", "0 0 % 0.5000 0 0.2500 0.3333 0 0 0", "#y_pred_te = f_tr_te(Xi, yi, X_te, model) if doNorm == True:", "len(test_index) ) #print len(X_te) #print len(test_index) #print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] )", "== 1: y_pred_fo = np.zeros( len(test_index) ) #print len(X_te) #print", "all neighbors' picks ---------- clus_ids_to_use = [] nei_labels = []", "(' Done evaluation using whole instances') print (' Start to", "% k is the number of centroids. ''' ixs =", "that node. % This algorithm will converge. For example, for", "1 0 #% 1 1 1 0 0 #% 1", "0 tfs = np.zeros((n_inst, len(clus)), dtype=bool) for i in range(0,", "= X[:, 0] #target = X[:, 1] #new = f_quantileNorm(templete,", "% ixsp is another cluster form. # # corresponds to", "'data/res_ICE_SVM_py.txt' f = open(f_res, 'w') #for j in range(1, 50):", "0 0 0 0.3333 0 0.5000 0 ''' #W =", "remote instances') except ValueError as e: print(e) print(' skip this", "# train models and calculate the error-dicision tables y_tr =", "many clusters does one inst belongs to. #overlap_factor = 2;", "+ clus_id_to_use # also find neighbor's label. maybe will be", "); I = np.argsort(sim, kind='mergesort') + 1 # [~, I2]", "f_clus_to_tfs(clus, n_inst): #% convert the cluster information from cell array", "include even a remote cluster. each instance in dec_ixs does", "for i in range(0, len(tfs)): row = list( np.where(tfs[i, :]", "size = '+str(each_clus_sz) ) # sim = squareform(pdist(X)); # sim", "% prediction using 10-fold CV. corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab", "which is the reason # % of instability. However, we", "most connected center node as the first centroid. a =", "sim #test #sim = f_eu_dist(X) def f_eu_dist2(X1, X2): ''' calculate", "auc = roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] #", "(' Start to evaluate each cluster ') # --------------------------- SELF", "range(0, len(err_mat)): # Matlab code: #dec_row = dec_mat(cur_nb_ix, :); #dec_row(:,", "to f_dec_tab_4_bg_svm() in Matlab version # ''' # #n_clusters =", "= f_te(X_te, model_whole) #y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model)", "f_dec_tab_4_bg_svm(X, y, clus): # ''' # Calculate the decision table", "0 0 0 % 0 0 0 0 1.0000 0", "'data/res_ICE_bg_svm_py.txt' f_res = pa + 'data/res_ICE_SVM_py.txt' f = open(f_res, 'w')", "prob at first. # # % This version 3 adds", "sklearn import metrics from sklearn.metrics import roc_auc_score from sklearn import", "the boolean table representation of clustering result to for each", "= a # number of partial models used n_partial =", "is % not good for fuzzy clusters. % k is", "in range(0, len(clus)): tfs[clus[i], i] = True return tfs #", "= np.unique(yj)[0] models.append(model_a_clust) return [y_pred_multi, models] # test #[y_pred_multi, models]", "y, model, k_fold=10): ''' corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version", "model): model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) return model_inner def f_te(X_te,", "to be 0. % nSteps: how many steps to walk", "-sim print(' done calculating the Euclidean distance matrix') # ---------------------------------------------------------------", "* p0 l2norm = max(np.sqrt(sum((pnew - p) ** 2) )", "model): ''' Calculate the decision table corresponds to f_dec_tab_4_bg_svm() in", "(' Done calculating error table and fitting ICE models') return", "= copy.deepcopy(model) try: y_pred_te = f_te(X_te, model_i) except : if", "this cluster') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred", "1. find the most connected center node as the first", "k=3): ''' % Convert the similarity matrix to a network", "the prediction using the whole data model_whole = models[-1] y_pred_all[:,", "N): # for each neighbour # find each neighbour's pick", "y_pred_ICE y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model) y_preds_whole[test_index] = y_pred_whole", "0 0 % 0 0 0.2500 0 0 0 0", "= rw_mat.flatten('F') a = np.flip(np.sort(tmp), 0) len1 = len(rw_mat) #cutoffs", "y_pred else: if len(yj) <= 15: print (' '+str(len(yj))+' insts", "probability to go to that node. % This algorithm will", "= row[inst_i_clus] - adv_self #print row ix_good_clus = list( np.where(", "network graph where each node % has k edges to", "skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True) skf.get_n_splits(X, y) for train_index, test_index", "to the whole data. y_pred_multi[clus[j].flatten(), j] = y_pred print(' c-'+str(j)+'", "f_find_centers_rwMat(rw_mat, k): ''' % on the rw_mat matrix, find some", "# the estimated size of each cluster. default is half", "y_pred.fill(np.unique(yj)[0]) #else: try: y_pred = f_k_fo(Xj, yj, model, fo_inner) #", "= neighbour_mat[:, j].flatten() ix = np.argsort(neighbour_col ) ix = ix[::-1]", "# f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa +", "number of clusters ''' The return variable clus stores the", "test #y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model) def f_ICE_fit(X_tr, y_tr,", "model, w, s) #[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus,", "= copy.deepcopy(model) print(' Cluster '+str(j)+' started...') #if len(yj) > 10:", "stores that for each instance, which cluster(s) it belongs to.", "lens[i] = len(c1[i]) return lens def f_eu_dist(X): ''' calculate the", "the diag to be 0. % nSteps: how many steps", "0 #% 1 1 0 0 0 #% 1 1", "!= 1: # ------------------ for self ------------------ #if np.unique(yj).size ==", "''' def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5): ''' This", "# [ True, False, False, True], # [ True, False,", "y_tr, n_clus, model, w, s) #[clus, models, dec_ixs] = f_ICE_fit_2(X_tr,", "+ 1 auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole = roc_auc_score(y.flatten(),", "= 0.3 RWR(A, nSteps, lazi, None) ''' # test #dst", "each cluster, while inst_clus stores that for each instance, which", "0.425] % % Each column represents the propability for each", "ix] print(' center inst. index of each cluster: ') ixs_centers", "# y_pred.fill(np.unique(yj)[0]) # copy to the whole array y_pred_multi[ix_other, j]", "= np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten() clus.append(tmp) # --------------------------------------------------------------- # %", "other ----------------- ix_other = set(range(0, y.size)) - set(clus[j].flatten()) ix_other =", "i in ix] print(' center inst. index of each cluster:", "0 0 #% 1 1 1 1 0 #% 1", "0 0 % 0 0.2500 0 0 0 0 0", "if doNorm == True: templete = y_pred_all[:, 0] target =", "some nodes as centroids, that is % not good for", "auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te) ''' #def f_bg_tr_te(X_tr, y_tr,", "3 adds the support for fuzzy clustering - one instance", "''' #def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging): # ''' #", "io import scipy.io #import pickle from sklearn.model_selection import StratifiedKFold #import", "svm.LinearSVC(), \\ #bagging = BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators =", "number of partial models used n_partial = len(clus_ids_to_use) # number", "bagging = BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators = 100 )", "= np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred if np.unique(yj).size ==", "True, False, True, False]]) # #array([[False, True, True, True], #", "= X[clus[j].flatten(), :] yj = y[clus[j].flatten() ] model_a_clust = copy.deepcopy(model)", "inst_i_clus = inst_clus[i] if len(inst_i_clus) > 0: row[inst_i_clus] = row[inst_i_clus]", "1 auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten()", "np.where( row < row[-1] ) [0] ) #print ix_good_clus if", "Calculate the decision table corresponds to f_dec_tab_4_bg_svm() in Matlab version", "#W = A * spdiags(sum(A)'.^(-1), 0, n, n); #W =", "np.ceil(np.log10(len(sim)) ) ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors) print(' done calculating the", "train_index, test_index in skf.split(X, y): # #print(\"TRAIN:\", train_index, \"TEST:\", test_index)", "information from cell array to mat. But for each #%", "to for each instance, what clusters it belongs to. '''", "clus_id_to_use # also find neighbor's label. maybe will be used", "calucating decision table') return [clus, models, dec_ixs] #def_deal_miss_v_1(d): ''' deal", "= y_pred_whole[ix_other] target = y_pred y_pred = f_quantileNorm(templete, target) #else:", "model) y_pred[test_index] = y_pred_fo #auc = roc_auc_score(y.flatten(), y_pred) return y_pred", "label(s) == 1') y_pred = np.zeros(y.size) y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j] =", "= [] # f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res =", "f_dec_tab_4_bg_svm() in Matlab version ''' n_clusters = len(clus) # err_mat", "# ---------- find all neighbors' picks ---------- clus_ids_to_use = []", "= (I2 >length(sim)-k); aRankNet = I2 > (len(sim) - k)", "0 0 % 0 0.2500 0 0 0 0.5000 0.5000", "# [0.3289, -0.0170, 0.8405, 0]]) # #f_sim_2_aRankNet(sim,1) #f_sim_2_aRankNet(sim,2) #f_sim_2_aRankNet(sim,3) #", "#y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model) #print 'whole model", "0, n, n).toarray() ) p = p0 pl2norm = np.inf", "dec_ixs] #[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr, X_te, clus,", "np.finfo(float).eps: break else: if l2norm == pl2norm: unchanged = unchanged", "= distance.euclidean(A) # corrent, the same as in Matlab def", "y_pred_multi[:, j] = y_pred if np.unique(yj).size == 1: print ('", "# # return [y_pred, auc] # test ''' X_tr =", "# test #tfs = f_clus_to_tfs(clus, len(X)) # pass def f_tfs_2_instClus(tfs):", "f_find_centers_rwMat(rw_mat, k) ct = getCutoff(rw_mat, each_clus_sz) rw_net = rw_mat >", "in Matlab version ''' n_clusters = len(clus) y_pred_multi = np.zeros((y.size,", "in the % column means the probability to go to", "nodes as the centroids for soft % clustering. If we", "the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be %", "tmp sim = -sim np.fill_diagonal(sim, 0) return sim #test #sim", "len(X_te) ) #n_wholes = np.zeros( len(X_te) ) for j in", "0.431, 0.027, 0.232, 0.232;... % 0.038, 0.029, 0.081, 0.009, 0.356,", "y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models) y_pred_ICE = np.zeros( len(X_te)", "0 0.5000 0.5000 % 0 0 0.2500 0 0 0", "0 0 0 0 % 0 0 0 0.3333 0", "id in y_pred_all a = clus_ids_to_use a = list( np.array(a)", "the input net matrix, with the diag to be 0.", "a # number of partial models used n_partial = len(clus_ids_to_use)", "False, True], # [ True, False, False, True], # [", "= svm.LinearSVR(), \\ #model = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ model", "stores y tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1) err_mat =", "pa + 'data/res_ICE_bg_svm_py.txt' f_res = pa + 'data/res_ICE_SVM_py.txt' f =", "ixs_centers[i] ] ).flatten() clus.append(tmp) # --------------------------------------------------------------- # % sort the", "= f_tr_te(Xi, yi, X_te, model) if doNorm == True: templete", "0.5000 % 0 0 0.2500 0 0 0 0 %", "f_fuzzy_rwr_clusters(err_mat, n_clus) tfs = f_clus_to_tfs(clus, len(X_tr)) [err_mat, models] = f_err_mat(X_tr,", "== pl2norm: unchanged = unchanged +1 if unchanged > 10:", "0 0.2500 0 0 0 0 0 % 0 0", "#yi = y_tr[clus[i].flatten() ] model_i = models[i] #model_a_clust = copy.deepcopy(model)", "remote cluster. each instance in dec_ixs does not contain the", "cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters] = y_pred_multi # make a tmp", "prediction using 10-fold CV. [y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus,", "as np #from scipy import io import scipy.io #import pickle", "starts...') print(' NO. clusters = '+str(k)+'; avg. cluster size =", "cluster: ') print(lens[ix]) print(' done RWR clustering') return clus_ordered #test", "may # % belongs to more than one cluster. #", "array. f_my_quantileNorm() ''' ix_target = np.argsort(target, kind='mergesort') ix_templete = np.argsort(templete,", "10000, will give the same result. ''' n = len(A)", "= f_tr_te(X_tr, y_tr, X_te, model) y_pred[test_index] = y_pred_fo #auc =", "= roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] # test", "decision table # % This version changed from the cluster-cluster", "re-clustering ******************** n_iter = 2 for i in range(0, n_iter):", "= 0.3 rw = RWR(ori_graph, nSteps, lazi) # remove probability", "# X: data # k: number of clusters ''' The", "len(clus) # err_mat stores the prediction error. pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the", "= y_tr[clus[i].flatten() ] model_i = models[i] #model_a_clust = copy.deepcopy(model) try:", "could be: % A = [0,2,2,0,0,0,0;... % 2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;...", "different range than the templete. templete and target should be", "\" TEST: \", test_index) X_tr, X_te = X[train_index], X[test_index] #y_tr,", "% This updated version also outputs the predicted values of", "the training data to predict the testing data. Use whole", "[0.5566, 0, -0.0842, -0.0170], \\ # [0.6448, -0.0842, 0, 0.8405],", "0 % 0.5000 0.2500 0 0 1.0000 0 0 %", "#if len(yj) > 10: if len(yj) > 15 and np.unique(yj).size", "'+str(len(yj))+' insts in cluster, <= 15, skip...') y_pred = np.zeros(y.size)", "\\ #model = BaggingClassifier(base_estimator = svm.LinearSVR(), \\ #model = BaggingClassifier(base_estimator", "for example, A could be: % A = [0,2,2,0,0,0,0;... %", "For example, for the above matrix, nSteps = % 100,", "''' # corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version # corresponds", "model): ''' corresponds to f_weka_bg_svm_tr_te() in Matlab version ''' #bagging", "not # % need normalization when predict y_pred_ICE. # %", "contains the same information that clus is the instances ids", "f_ICE_fit(X_tr, y_tr, n_clus, model, w, s) #[clus, models, dec_ixs] =", "clusters it belongs to. ''' inst_clus = [] for i", "Matlab version ''' y = y.flatten() y_pred = np.zeros(y.size) skf", "the easy dataset mesothelioma get 1.0 CV result. # breast", "= -sim return sim #test #sim = f_eu_dist2(X_tr, X_te) def", "#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3 clus = f_fuzzy_rwr_clusters(X, n_clus)", "keep the top k edges. #aRankNet = (I2 >length(sim)-k); aRankNet", "the instances ids for each cluster, while inst_clus stores that", "X_te, model): ''' corresponds to f_weka_bg_svm_tr_te() in Matlab version '''", "# number of partial models used n_partial = len(clus_ids_to_use) #", "= np.argsort(templete, kind='mergesort') target[ix_target] = templete[ix_templete] new = target return", "evaluate each cluster ') # --------------------------- SELF ------------------------- # predict", "cross-validation # #fo_inner = 10 # # --------------------------- WHOLE -------------------------", "f_quantileNorm(templete, target) else: y_pred = y_pred_te y_pred_all[:, i+1] = y_pred", "# breast cancer get 0.599 # all results are correct.", "1 0 0 #% 1 1 0 0 0 #%", "1 1 1 1 0 #% ... #% 1 1", "'+str(j)+' started...') #if len(yj) > 10: if len(yj) > 15", "y_pred = model_a_clust.predict_proba(X_other) y_pred = y_pred[:, 1].flatten() # quantileNorm templete", "#% instance, the rank of clusters information will be lost", "np.zeros( len(test_index) ) #print len(X_te) #print len(test_index) #print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0]", "# #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators", "np.fill_diagonal(sim, 0) # [~, I] = sort(sim-diag(diag(sim) ) ); I", "are correct. def f_quantileNorm(templete, target): ''' Templete is the standard,", "] model_i = models[i] #model_a_clust = copy.deepcopy(model) try: y_pred_te =", "same result. ''' n = len(A) if p0 == None:", "n) will be % 0.2500 0 0 0 0 0", "y.size ) y_preds_whole = np.zeros( y.size ) fold_i = 1", "clusters. % k is the number of centroids. ''' ixs", "the good cluster(s) for each instance, which may include even", "#test #>>> a = np.array([[1,2], [3,4]]) #>>> a.flatten() #array([1, 2,", "distance.euclidean(A) # corrent, the same as in Matlab def f_sim_2_aRankNet(sim,", "f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te): # #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(),", ") neighbour_mat = f_eu_dist2(X_tr, X_te) # ---------- for each testing", "for i in range(1, k): tmp = rw_mat[:, ixs] b", "A * spdiags(sum(A)'.^(-1), 0, n, n); #W = spdiags(np.power(sum(np.float64(A)) ,", "= np.argsort(lens)[::-1] clus_ordered = [clus[i] for i in ix] print('", "frb #from joblib import Parallel, delayed #import multiprocessing def RWR(A,", "is a 1d double #% array. This function counts the", "calculate the euclidean distance between instances ''' sim = np.zeros((", "= copy.deepcopy(model) model_inner.fit(X_tr, y_tr) y_pred = model_inner.predict_proba(X_te) y_pred = y_pred[:,", "do not # % need normalization when predict y_pred_ICE. #", "# % sort the clusters lens = f_len_of_each_ele(clus) ix =", "X[train_index], X[test_index] #y_tr, y_te = y[train_index], y[test_index] y_tr = y[train_index]", "CV result. # breast cancer get 0.599 # all results", "= y_pred y_pred = f_quantileNorm(templete, target) #else: # y_pred =", "#% 295x5 double #% 1 0 0 0 0 #%", "= roc_auc_score(y_te.flatten(), y_pred) return y_pred def f_k_fo(X, y, model, k_fold=10):", "y_pred_te y_pred = f_quantileNorm(templete, target) else: y_pred = y_pred_te y_pred_all[:,", "= 10 aucs_ICE = [] aucs_whole = [] # f_res", "y[test_index] [clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w,", "if l2norm < np.finfo(float).eps: break else: if l2norm == pl2norm:", "f_len_of_each_ele(clus) ix = np.argsort(lens)[::-1] clus_ordered = [clus[i] for i in", "templete = y_pred_whole[clus[j].flatten()] target = y_pred y_pred = f_quantileNorm(templete, target)", "# # return [y_pred, auc] def f_tr(X_tr, y_tr, model): model_inner", "------------------ for self ------------------ #if np.unique(yj).size == 1: # y_pred", "ct = getCutoff(rw_mat, each_clus_sz) rw_net = rw_mat > ct #", "in range(0, len(tfs)): row = list( np.where(tfs[i, :] ) [0]", "dec_mat stores the prediction error. # #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra", "laziness is 0.3, p0 is default, the result is: %", "0.132, 0.117, 0.071, 0.071;... % 0.220, 0.167, 0.463, 0.052, 0.324,", "tmp = distance.euclidean(X1[i], X2[j]) sim[i][j] = tmp sim = -sim", "f_eu_dist2(X_tr, X_te) # ---------- for each testing instance ---------- #n_partials", "each cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters] = y_pred_multi # make a", "the diagonal 1s. # aRankNet = aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet, False)", "random_state=None, n_estimators = 100 ) [err_mat, models] = f_err_mat(X_tr, y_tr,", "BaggingClassifier(base_estimator = svm.LinearSVC(), \\ model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\", "with the diag to be 0. % nSteps: how many", "has k edges to other nodes (aRank). ''' # delete", "indices for each cluster. However, this data structure is not", "probability. usually it is a zero matrix with the diag", "1 0 0 0 0 #% 1 1 1 0", "if one cluster decision # % is wrong leading entrie", "column, just keep the top k edges. #aRankNet = (I2", "else: y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model) y_pred[test_index] = y_pred_fo", "y_pred = bagging.predict_proba(X_te) # y_pred = y_pred[:, 1].flatten() # #", "column means the probability to go to that node. %", "first col is the prediction using the whole data #", "svm.SVC(kernel='linear'), \\ # random_state=None, n_estimators = 100 ) [err_mat, models]", "SELF ------------------------- def f_err_mat(X, y, clus, model): ''' Calculate the", "# corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version # corresponds to", "# for each neighbour # find each neighbour's pick cur_nb_ix", "0 0 0 0.5000 0.5000 % 0 0 0.2500 0", "find for a instance, which are the clusters it belongs", "+ [0] * n_whole #print ' clus_ids_to_use:' #print clus_ids_to_use #print", "in range(0, n_iter): clus = f_fuzzy_rwr_clusters(err_mat, n_clus) tfs = f_clus_to_tfs(clus,", "= 100 ) model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) y_pred =", "= StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE = np.zeros( y.size ) y_preds_whole", "to decision table. ''' dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ),", "70) ''' def f_len_of_each_ele(c1): #% Assume c1 is a 1-dimension", "parameters alpha, beta and N. n_whole = int( round( alpha*n_partial", "i in range(1, nSteps+1): if i % 100 == 0:", "ixs = [] # 1. find the most connected center", "cluster. default is half the number of # instances. each_clus_sz=len(X)/3", "100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) [dec_mat,", "#W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray() W", "False, False, True], # [ True, True, True, False]]) def", "[0,0,0,1,0,1,0]]) nSteps = 1000 lazi = 0.3 RWR(A, nSteps, lazi,", "- k) # make it a diagonal matrix # aRankNet", "rw_mat = rw print(' done RWR') # --------------------------------------------------------------- ixs_centers =", "y_pred = model_inner.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() #auc = roc_auc_score(y_te.flatten(),", "the standard, change the target to the values in the", "train models and calculate the error-dicision tables y_tr = y_tr.astype(float)", "each element is a 1d double #% array. This function", "0 0 0 0 1.0000 0 0 % 0 0", "[ True, False, False, True], # [ True, False, True,", "as the centroids for soft % clustering. If we just", "[0,0,1,0,0,0,0],\\ [0,0,0,1,0,0,1],\\ [0,0,0,1,0,1,0]]) nSteps = 1000 lazi = 0.3 RWR(A,", "np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col #", "% -------- RWR -------- nSteps = 1000 lazi = 0.3", "cannot use a systematic evaluation criteria # % such as", "rw = RWR(ori_graph, nSteps, lazi) # remove probability of returning", "templete = y_pred_all[:, 0] target = y_pred_te y_pred = f_quantileNorm(templete,", "0 #% 1 1 1 0 0 #% 1 1", "# ''' # Calculate the decision table # % This", "fuzzy clusters. % k is the number of centroids. '''", "RWR(ori_graph, nSteps, lazi) # remove probability of returning start node", "scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'", "later as KNN pred # instead of using whole to", "np.zeros(len(X_te)) y_pred_te.fill(np.nan) #except NotFittedError as e: # print(repr(e)) # y_pred_te", "change the target to the values in the templete. Target", "------------------------- # predict the whole instances using each cluster data,", "n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train models", "tmp = rw_mat[:, ixs] b = np.sum(tmp, axis=1) b[ixs] =", "instances from two datasets ''' sim = np.zeros(( len(X1), len(X2)", "inst_clus = f_tfs_2_instClus(tfs) for i in range(0, len(err_mat)): # Matlab", "1 0 0 0 0 #% 1 1 1 1", "= roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] def f_tr(X_tr,", "err mat to re-clustering ''' # rwr based fuzzy clustering", "#print row row[-1] = row[-1] - adv_whole inst_i_clus = inst_clus[i]", "# instances. each_clus_sz=len(X)/3 print('RWR-based fuzzy clustering starts...') print(' NO. clusters", "f_clus_to_tfs(clus, len(X_tr)) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) #", "nei_labels = nei_labels + list( y_tr[cur_nb_ix] ) #print ' clus_ids_to_use:'", "0.154, 0.009, 0.425, 0.203;... % 0.017, 0.035, 0.014, 0.154, 0.009,", "be lost - you won't know #% what is the", "tfs = f_clus_to_tfs(clus, len(X)) y = y.astype(float) #model = BaggingClassifier(base_estimator", "## dec_mat stores the prediction error. # #pred_mat=np.zeros((y.size, n_clusters+1)) #the", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Mar", "rest nodes for i in range(1, k): tmp = rw_mat[:,", "11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(),", "print( j) print( 'fold ' + str(fold_i) + ' finished')", "inst_clus[i] if len(inst_i_clus) > 0: row[inst_i_clus] = row[inst_i_clus] - adv_self", "##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y =", "np.zeros( y.size ) fold_i = 1 for train_index, test_index in", "= f_fuzzy_rwr_clusters(err_mat, n_clus) tfs = f_clus_to_tfs(clus, len(X_tr)) [err_mat, models] =", "id + 1 to make the ix fit the col", "#test #sim = f_eu_dist(X) def f_eu_dist2(X1, X2): ''' calculate the", "inst_clus.append(row) return inst_clus # test #inst_clus = f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr,", "if model_i == 0: y_pred_te = np.zeros(len(X_te)) elif model_i ==", "is for whole pred # # # ## k_fold of", "return sim #test #sim = f_eu_dist2(X_tr, X_te) def f_fuzzy_rwr_clusters(X, k=100,", "= 5 # --------------------------- WHOLE ------------------------- # Predict each cluster", "clus_ids_to_use = clus_ids_to_use + [0] * n_whole #print ' clus_ids_to_use:'", ") ) p = pnew if l2norm < np.finfo(float).eps: break", "algorithm will converge. For example, for the above matrix, nSteps", "find the most connected center node as the first centroid.", "model, w=0.4, s=0.5): ''' This version use the err mat", "by whole data #models = [] # k_fold of inner", "roc_auc_score(y.flatten(), y_preds_ICE.flatten() ) auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print (auc_ICE,", "err_mat[i, :] ) #print row row[-1] = row[-1] - adv_whole", "aRankNet = np.logical_or(aRankNet, aRankNet.T) # remove the diagonal 1s. #", "tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\", "model = svm.SVC(kernel='linear', probability = True) skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X,", "** 2) ) ) p = pnew if l2norm <", "False, True, False]]) # #array([[False, True, True, True], # [", "tmp sim[j][i] = tmp sim = -sim print(' done calculating", "X[test_index] # y_tr, y_te = y[train_index], y[test_index] def f_use_each_clus_forWhole(X, y,", "# quantileNorm templete = y_pred_whole[ix_other] target = y_pred y_pred =", "+ ' \\t ' + str(auc_whole) + '\\n') except: continue", "''' inst_clus = [] for i in range(0, len(tfs)): row", "calculate the error-dicision tables y_tr = y_tr.astype(float) #model = BaggingClassifier(base_estimator", "30:breast cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress", "y_pred_fo else: y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model) y_pred[test_index] =", "= '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer y =", "version ''' #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ #bagging =", "np.argsort(templete, kind='mergesort') target[ix_target] = templete[ix_templete] new = target return new", "# bagging = BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators = 100", "However, this data structure is not easy to find for", "> (len(sim) - k) # make it a diagonal matrix", "#print(\"TRAIN:\", train_index, \"TEST:\", test_index) # X_tr, X_te = X[train_index], X[test_index]", "the whole instances, while self % prediction using 10-fold CV.", "CV. [y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \\ y_pred_whole, model,", "1d n by 1 array. f_my_quantileNorm() ''' ix_target = np.argsort(target,", "ICE models') return [err_mat, models] \"\"\" #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\ #", "of clusters ''' The return variable clus stores the instance", "\", train_index, \" TEST: \", test_index) X_tr, X_te = X[train_index],", "y_tr) return model_inner def f_te(X_te, model): y_pred = model.predict_proba(X_te) y_pred", "def f_quantileNorm(templete, target): ''' Templete is the standard, change the", "or 10000, will give the same result. ''' n =", "0.5000 0.2500 0 0 1.0000 0 0 % 0 0.2500", "SELF ------------------------- # predict the whole instances using each cluster", "#% array. This function counts the length of each double", "print(\"TRAIN:\", train_index, \"TEST:\", test_index) X_tr, X_te = X[train_index], X[test_index] y_tr,", "is the top 1/2/3 cluster it belongs to. #% #%", "= 0.4 s = 0.5 N = 5 alpha =", "15: print (' '+str(len(yj))+' insts in cluster, <= 15, skip...')", "each instance, which cluster(s) it belongs to. dec_ixs stores the", "0 % 0 0 0 0 1.0000 0 0 %", "# all results are correct. def f_quantileNorm(templete, target): ''' Templete", "the reason # % of instability. However, we cannot use", "row row[-1] = row[-1] - adv_whole inst_i_clus = inst_clus[i] if", "= y_pred return y_pred_all # test #y_pred_all = f_ICE_tr_te_all_clus(X, X,", "y_pred.fill(np.unique(yj)[0]) y_pred_multi[:, j] = y_pred model_a_clust = np.unique(yj)[0] models.append(model_a_clust) return", "> 0: row[inst_i_clus] = row[inst_i_clus] - adv_self #print row ix_good_clus", "10 aucs_ICE = [] aucs_whole = [] # f_res =", "1 1 1 #% 1 0 0 0 0 #%", "this data structure is not easy to find for a", "1000 lazi = 0.3 rw = RWR(ori_graph, nSteps, lazi) #", "be 1d n by 1 array. f_my_quantileNorm() ''' ix_target =", "f_tr_te(X_tr, y_tr, X_te, model) y_pred[test_index] = y_pred_fo #auc = roc_auc_score(y.flatten(),", "use a systematic evaluation criteria # % such as AUC,", "fuzzy clustering starts...') print(' NO. clusters = '+str(k)+'; avg. cluster", "len(err_mat), err_mat[0].size-1 ), dtype=bool) # dec_ixs: for each instance, which", "in range(0, N): # for each neighbour # find each", "on parameters alpha, beta and N. n_whole = int( round(", "cluster Xj = X[clus[j].flatten(), :] yj = y[clus[j].flatten() ] model_a_clust", "alpha = 1 beta = 1 k_fold = 10 aucs_ICE", "False, False], # [ True, False, False, True], # [False,", "0 0 0 % 0 0 0.2500 0 0 0", "the rest nodes for i in range(1, k): tmp =", "from the second col, the result is by each cluster", "print (' Done evaluation using each cluster') models.append(model_whole) pred_prob_mat[:, 0:n_clusters]", "#f_res = pa + 'data/res_ICE_bg_svm_py.txt' f_res = pa + 'data/res_ICE_SVM_py.txt'", "2,0,1,1,0,0,0;... % 2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;... %", "#X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y'] #imgplot =", "models, dec_ixs] #def_deal_miss_v_1(d): ''' deal with missing values by replacing", "--------------------------- WHOLE ------------------------- # # # --------------------------- SELF ------------------------- def", "unchanged = unchanged +1 if unchanged > 10: break else:", "100) # pass def f_clus_to_tfs(clus, n_inst): #% convert the cluster", "a stores y tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1) err_mat", "range(0, len(X2) ): tmp = distance.euclidean(X1[i], X2[j]) sim[i][j] = tmp", "#auc = roc_auc_score(y.flatten(), y_pred) return y_pred # test #pa =", "= f_quantileNorm(templete, target) #def f_bg_k_fo_3(X, y, k_fold=10): # ''' #", "# % This version changed from the cluster-cluster dec_mat to", "= np.zeros( len(X_te) ) #n_wholes = np.zeros( len(X_te) ) for", "partial models used n_partial = len(clus_ids_to_use) # number of whole", "data, while self # prediction using 10-fold CV. [y_pred_multi, models]", "= f_tfs_2_instClus(tfs) for i in range(0, len(err_mat)): # Matlab code:", "n).toarray() W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \\ 0, n,", "\", test_index) X_tr, X_te = X[train_index], X[test_index] #y_tr, y_te =", "= svm.LinearSVC(), \\ # bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\", "use the err mat to re-clustering ''' # rwr based", "* W.dot(p) + laziness * p0 l2norm = max(np.sqrt(sum((pnew -", "''' # corresponds to f_weka_bg_svm_tr_te() in Matlab version # '''", "% such as AUC, I will try using the predicted", "the predicted prob at first. # # % This version", "y_pred[:, 1].flatten() return y_pred def f_tr_te(X_tr, y_tr, X_te, model): '''", "to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version # corresponds to f_k_fo() #", "\"TEST:\", test_index) # X_tr, X_te = X[train_index], X[test_index] # y_tr,", "#fo_inner = 10 # # --------------------------- WHOLE ------------------------- # #", "scipy import io import scipy.io #import pickle from sklearn.model_selection import", "= BaggingClassifier(base_estimator = svm.LinearSVC(), \\ #bagging = BaggingClassifier(BaseBagging, \\ #", "= f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te): # #bagging =", "as frb #from joblib import Parallel, delayed #import multiprocessing def", "len(X_tr)) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # *******************************************************", "probablity to go back. % p0: the initial probability. usually", "f_weka_bg_svm_tr_te() in Matlab version # ''' # #bagging = BaggingClassifier(base_estimator", "1000 or 10000, will give the same result. ''' n", "test_index) X_tr, X_te = X[train_index], X[test_index] #y_tr, y_te = y[train_index],", "diag to % be 1. % % for example, A", "will be lost - you won't know #% what is", "1 1 1 0 #% ... #% 1 1 1", "= tmp sim = -sim return sim #test #sim =", "insts in cluster, <= 15, skip...') y_pred = np.zeros(y.size) y_pred.fill(np.nan)", ") = dec_row(:, end ) - adv_whole; #dec_row(:, clus_id) =", "of top neighbors:' #print ix_top_neighbors # ---------- find all neighbors'", "y_pred_all = np.zeros(( len(X_te), len(clus) + 1 )) # the", "np.where(tfs[i, :] ) [0] ) inst_clus.append(row) return inst_clus # test", "clus): # ''' # Calculate the decision table # %", "propability for each node. each element in the % column", "models, dec_ixs] def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1): '''", "#else: # y_pred = np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0]) # copy to", "X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1): ''' clus and inst_clus contains the", "in the templete. Target may have a very different range", "% This version changed from the cluster-cluster dec_mat to instance-cluster", "model_i == 0: y_pred_te = np.zeros(len(X_te)) elif model_i == 1:", "models] \"\"\" #mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names']", "# y_pred = bagging.predict_proba(X_te) # y_pred = y_pred[:, 1].flatten() #", "a instance, which are the clusters it belongs to, thus", "= scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer #y =", "= np.zeros(len(X_te)) elif model_i == 1: y_pred_te = np.ones(len(X_te)) else:", "0.5000 0 0.2500 0.3333 0 0 0 % 0.5000 0.2500", "aRankNet = aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet, False) return aRankNet # test", "* len1 ) print( all_neibs) ct = a[all_neibs] return ct", "False]]) # #array([[False, True, True, True], # [ True, False,", ") #print len(X_te) #print len(test_index) #print y_pred_fo y_pred_fo.fill(np.unique(y_tr)[0] ) #print", "by mean. ''' def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):", "sim = -sim np.fill_diagonal(sim, 0) return sim #test #sim =", "in skf.split(X, y): #print(\"TRAIN: \", train_index, \" TEST: \", test_index)", "matplotlib.pyplot as plt from sklearn.ensemble import BaggingClassifier from sklearn import", "# ------------------ for other ----------------- ix_other = set(range(0, y.size)) -", "100 ) # bagging.fit(X_tr, y_tr) # # y_pred = bagging.predict_proba(X_te)", "y, model, k_fold=10) # #print roc_auc_score(y.flatten(), y_pred) # the easy", "j in range(0, len(X2) ): tmp = distance.euclidean(X1[i], X2[j]) sim[i][j]", "y_tr, X_te, model) y_pred[test_index] = y_pred_fo #auc = roc_auc_score(y.flatten(), y_pred)", "def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True): ''' Use the training", "it belongs to. ''' inst_clus = [] for i in", "import NotFittedError #import FuzzyRwrBagging as frb #from joblib import Parallel,", "##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer ##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X", "#f_sim_2_aRankNet(sim,3) # #array([[False, True, True, False], # [ True, False,", "prediction error. pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole", "return [clus, models, dec_ixs] def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs,", "******************** re-clustering ******************** n_iter = 2 for i in range(0,", "= 100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)", "test # pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa = '/Users/zg/Dropbox/bio/ICE_2018/' #", "1 0 #% ... #% 1 1 1 1 1", "#print ' ix of top neighbors:' #print ix_top_neighbors # ----------", "y_tr, X_te, y_te, BaseBagging): # ''' # corresponds to f_weka_bg_svm_tr_te()", "templete = y_pred_whole[ix_other] target = y_pred y_pred = f_quantileNorm(templete, target)", "pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100 w = 0.4 s", "X_tr, X_te = X[train_index], X[test_index] #y_tr, y_te = y[train_index], y[test_index]", "normalization take place in y_pred_self and y_pred_other, thus do not", "= -sim np.fill_diagonal(sim, 0) return sim #test #sim = f_eu_dist(X)", "len(X_te) ): # for each testing instance # find the", "y_pred_ICE # test # pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' # pa =", "k): ''' % on the rw_mat matrix, find some nodes", "model, k_fold=10): ''' corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version '''", "for i in range(0, k): tmp = np.argwhere(rw_net[:, ixs_centers[i] ]", "row[-1] = row[-1] - adv_whole inst_i_clus = inst_clus[i] if len(inst_i_clus)", "= templete[ix_templete] new = target return new # test #templete", "f_quantileNorm(templete, target) #def f_bg_k_fo_3(X, y, k_fold=10): # ''' # corresponds", "cluster of training data to predict the testing data. '''", "''' clus and inst_clus contains the same information that clus", "f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) # train", "= l2norm return p # test RWR() ''' A =", "= y_pred if np.unique(yj).size == 1: print (' warning, #unique", "calculating the Euclidean distance matrix') # --------------------------------------------------------------- aRank_k_neighbors = np.ceil(np.log10(len(sim))", "using whole instances') print (' Start to evaluate each cluster", "cluster. # % This updated version also outputs the predicted", "= clus_ids_to_use a = list( np.array(a) + 1 ) clus_ids_to_use", "#n_clusters = len(clus) # ## dec_mat stores the prediction error.", "for i in range(0, len(clus)): #Xi = X_tr[clus[i].flatten(), :] #yi", "be 0. % nSteps: how many steps to walk %", "print(ixs_centers[ix]) print(' size of each cluster: ') print(lens[ix]) print(' done", "f_err_mat(X_tr, y_tr, clus, model) [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w,", "n_clus) tfs = f_clus_to_tfs(clus, len(X)) y = y.astype(float) #model =", "k_fold = 10 aucs_ICE = [] aucs_whole = [] #", "#print y_pred_fo else: y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model) y_pred[test_index]", "X: data # k: number of clusters ''' The return", "# ---------- for each testing instance ---------- #n_partials = np.zeros(", "as in Matlab def f_sim_2_aRankNet(sim, k=3): ''' % Convert the", "+ list( y_tr[cur_nb_ix] ) #print ' clus_ids_to_use:' #print clus_ids_to_use #", "len(X_te), len(clus) + 1 )) # the first col is", "0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0] % % if nSteps is", "predictions by each cluster # the last col stores the", "#import matplotlib.pyplot as plt from sklearn.ensemble import BaggingClassifier from sklearn", "n_whole #print ' clus_ids_to_use:' #print clus_ids_to_use #print nei_labels y_pred_ICE[j] =", "= models[i] #model_a_clust = copy.deepcopy(model) try: y_pred_te = f_te(X_te, model_i)", "ValueError as e: print(e) print(' skip this cluster') y_pred =", "to. dec_ixs stores the good cluster(s) for each instance, which", "by each cluster # the last col stores the pred", "1] #new = f_quantileNorm(templete, target) #def f_bg_k_fo_3(X, y, k_fold=10): #", "#y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model) def f_ICE_fit(X_tr, y_tr, n_clus,", "10: if len(yj) > 15 and np.unique(yj).size != 1: #", "+ laziness * p0 l2norm = max(np.sqrt(sum((pnew - p) **", "''' % on the rw_mat matrix, find some nodes as", "'/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/' ##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer ##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']", "n_estimators = 100 ) #y_pred = f_k_fo(X, y, model, k_fold=10)", "[] for j in range(0, n_clusters): # for each cluster", "= 10 # # --------------------------- WHOLE ------------------------- # # #", "= [] for i in range(0, k): tmp = np.argwhere(rw_net[:,", "== 1: y_pred_te = np.ones(len(X_te)) else: y_pred_te = np.zeros(len(X_te)) y_pred_te.fill(np.nan)", "yi, X_te, model) if doNorm == True: templete = y_pred_all[:,", "y[test_index] def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner): ''' %", "= f_eu_dist2(X_tr, X_te) # ---------- for each testing instance ----------", "# random_state=None, n_estimators = 100 ) model = svm.SVC(kernel='linear', probability", "= f_err_2_decMat(err_mat, tfs) def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True): '''", "in Matlab version # corresponds to f_k_fo() # ''' #", "half the number of # instances. each_clus_sz=len(X)/3 print('RWR-based fuzzy clustering", "# Predict each cluster using the whole data. model_whole =", "svm.LinearSVC(), \\ # bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ #", "auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print (auc_ICE, auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole)", "== 1: # y_pred = np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0]) #else: try:", "# corresponds to f_dec_tab_4_bg_svm() in Matlab version # ''' #", "pnew = (1-laziness) * W.dot(p) + laziness * p0 l2norm", "same as in Matlab def f_sim_2_aRankNet(sim, k=3): ''' % Convert", "dtype=bool) for i in range(0, len(clus)): tfs[clus[i], i] = True", "l2norm == pl2norm: unchanged = unchanged +1 if unchanged >", "lost - you won't know #% what is the top", "node. each element in the % column means the probability", "very different range than the templete. templete and target should", "0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0] % % if nSteps is 1000 and", "1.0 CV result. # breast cancer get 0.599 # all", "' + str(fold_i) + ' finished') fold_i = fold_i +", "0 0 % 0 0 0 0 1.0000 0 0", "0 for i in range(1, nSteps+1): if i % 100", "clus, model) # ******************** re-clustering ******************** n_iter = 2 for", "+ 1 # [~, I2] = sort(I); I2 = (np.argsort(I,", "convert the boolean table representation of clustering result to for", "n_clusters = len(clus) y_pred_multi = np.zeros((y.size, n_clusters) ) models =", "[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) # ******************** re-clustering", "find the farthest node ix = np.argmin(b) ixs.append(ix) return ixs", "1: # y_pred = np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0]) #else: try: y_pred", "i in range(0, n_iter): clus = f_fuzzy_rwr_clusters(err_mat, n_clus) tfs =", "% 0 0.2500 0 0 0 0 0 % 0", "svm.SVC(kernel='linear', probability = True) skf = StratifiedKFold(n_splits=k_fold) skf.get_n_splits(X, y) y_preds_ICE", "w, s) y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta)", "done calculating the A-rank KNN graph') # % -------- RWR", "= f_tr_te(Xj, yj, X_other, model) #if np.unique(yj).size != 1: model_a_clust.fit(Xj,", "pred. nei_labels = nei_labels + list( y_tr[cur_nb_ix] ) #print '", "auc_whole) aucs_ICE.append(auc_ICE) aucs_whole.append(auc_whole) f.write(str(j) + '\\t' + str(auc_ICE) + '", "---------- clus_ids_to_use = [] nei_labels = [] for cur_nb in", "Each column represents the propability for each node. each element", "test #sim = np.array([[0, 0.5566, 0.6448, 0.3289], \\ # [0.5566,", "= aRankNet-diag(diag(aRankNet) ); np.fill_diagonal(aRankNet, False) return aRankNet # test #sim", "# Matlab code: #dec_row = dec_mat(cur_nb_ix, :); #dec_row(:, end )", "aRankNet = max(aRankNet, aRankNet'); aRankNet = np.logical_or(aRankNet, aRankNet.T) # remove", "model_i == 1: y_pred_te = np.ones(len(X_te)) else: y_pred_te = np.zeros(len(X_te))", "= f_quantileNorm(templete, target) else: y_pred = y_pred_te y_pred_all[:, i+1] =", "cancer #y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress #y", "3, 4]) #>>> a.flatten('F') #array([1, 3, 2, 4]) ''' a", "each node % has k edges to other nodes (aRank).", "instances') print (' Start to evaluate each cluster ') #", "i in range(0, k): tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten()", "abs(pred_prob_mat - tmp ) print (' Done calculating error table", "ixs # test #tmp = f_find_centers_rwMat(rw_mat, 10) def getCutoff(rw_mat, avgNeighborsSize):", "= 5 alpha = 1 beta = 1 k_fold =", "'+str(i)+' good ' #y_pred_te = f_tr_te(Xi, yi, X_te, model) if", "49): try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer y =", "for each instance, which clusters should be used. dec_ixs =", "models used n_partial = len(clus_ids_to_use) # number of whole models", "int( avgNeighborsSize * len1 ) print( all_neibs) ct = a[all_neibs]", "X_te, model) y_preds_whole[test_index] = y_pred_whole print( j) print( 'fold '", "aRank_k_neighbors = np.ceil(np.log10(len(sim)) ) ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors) print(' done", "fold pred_prob_mat[:, n_clusters] = y_pred_whole print (' Done evaluation using", "0, -0.0842, -0.0170], \\ # [0.6448, -0.0842, 0, 0.8405], \\", "nSteps+1): if i % 100 == 0: print(' done rwr", "+ 'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa + 'data/res_ICE_bg_svm_py.txt' f_res = pa", "table. ''' dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool) #", "# % such as AUC, I will try using the", "corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version ''' n_clusters = len(clus)", "calculating error table and fitting ICE models') return [err_mat, models]", "------------------------- def f_err_mat(X, y, clus, model): ''' Calculate the decision", "neighbors:' #print ix_top_neighbors # ---------- find all neighbors' picks ----------", "whole instances using each cluster data, while self # prediction", "\"\"\" Created on Mon Mar 5 05:47:03 2018 @author: zg", "f_fuzzy_rwr_clusters(X, n_clus) tfs = f_clus_to_tfs(clus, len(X)) y = y.astype(float) #model", "X_other = X[ix_other , :] #y_other = y[ix_other ] #", "True, False], # [ True, False, False, False], # [", "#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ #model = BaggingClassifier(base_estimator =", "instance may # % belongs to more than one cluster.", "cluster. each instance in dec_ixs does not contain the whole", "unchanged > 10: break else: unchanged = 0 pl2norm =", "the random walk algorithm. % A is the input net", "ix_other = set(range(0, y.size)) - set(clus[j].flatten()) ix_other = list(ix_other) #print", "dec_ixs, models,N=5,alpha=1,beta=1): ''' clus and inst_clus contains the same information", "mat. But for each #% instance, the rank of clusters", "version # ''' # #n_clusters = len(clus) # ## dec_mat", "BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators = 100 ) # bagging.fit(X_tr,", "' ix of top neighbors:' #print ix_top_neighbors # ---------- find", "= y[clus[j].flatten() ] model_a_clust = copy.deepcopy(model) print(' Cluster '+str(j)+' started...')", "''' % using each cluster data to predict the whole", "# # ## k_fold of inner cross-validation # #fo_inner =", "# test #inst_clus = f_tfs_2_instClus(tfs) #def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te):", "sklearn from scipy.sparse import spdiags from scipy.spatial import distance #import", "# quantileNorm templete = y_pred_whole[clus[j].flatten()] target = y_pred y_pred =", "n_clus, model, w=0.4, s=0.5): ''' This version use the err", "y_pred def f_k_fo(X, y, model, k_fold=10): ''' corresponds to f_weka_bg_svm_arff_k_fo_3_parfor()", "try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y']", "#[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model) #def f_dec_tab_4_bg_svm(X,", "fuzzy clustering - one instance may # % belongs to", ":] yj = y[clus[j].flatten() ] model_a_clust = copy.deepcopy(model) print(' Cluster", "% normalization take place in y_pred_self and y_pred_other, thus do", "corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version ''' y = y.flatten()", "% clustering. If we just random pickup some nodes as", "0 0.2500 0 0 0 0.5000 0.5000 % 0 0", "y_tr, model): model_inner = copy.deepcopy(model) model_inner.fit(X_tr, y_tr) return model_inner def", "target) #def f_bg_k_fo_3(X, y, k_fold=10): # ''' # corresponds to", "y_tr, n_clus, model, w=0.4, s=0.5): ''' This version use the", "for i in range(0, len(X)): for j in range(i+1, len(X)):", "#[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s)", "is by each cluster for i in range(0, len(clus)): #Xi", "target) else: y_pred = y_pred_te y_pred_all[:, i+1] = y_pred return", "+ str(fold_i) + ' finished') fold_i = fold_i + 1", "y) # # for train_index, test_index in skf.split(X, y): #", "i in range(0, len(clus)): #Xi = X_tr[clus[i].flatten(), :] #yi =", "+ 1).T # for every column, just keep the top", "double 1x161 double 1x62 double #% #% tfs e.g: #%", "y_pred = f_quantileNorm(templete, target) else: y_pred = y_pred_te y_pred_all[:, i+1]", "100 ) model = svm.SVC(kernel='linear', probability = True) skf =", "col # % most connected node. ix = np.argmax(a) ixs.append(ix)", "# instead of using whole to pred. nei_labels = nei_labels", "model_inner.fit(X_tr, y_tr) y_pred = model_inner.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() #auc", "''' # #n_clusters = len(clus) # ## dec_mat stores the", "# remove probability of returning start node np.fill_diagonal(rw, 0) rw_mat", "the Euclidean distance matrix') # --------------------------------------------------------------- aRank_k_neighbors = np.ceil(np.log10(len(sim)) )", "0.3333 0 0 0 % 0 0 0 0 1.0000", "X y_tr = y X_te = X y_te = y", "0.009, 0.356, 0.004, 0.004;... % 0.017, 0.035, 0.014, 0.154, 0.009,", "= len(c1[i]) return lens def f_eu_dist(X): ''' calculate the euclidean", "0] #target = X[:, 1] #new = f_quantileNorm(templete, target) #def", "# test #y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model) def f_ICE_fit(X_tr,", "[] for cur_nb in range(0, N): # for each neighbour", "make the ix fit the col id in y_pred_all a", "y_preds_ICE.flatten() ) auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() ) print (auc_ICE, auc_whole)", "False) return aRankNet # test #sim = np.array([[0, 0.5566, 0.6448,", "''' Convert the err table to decision table. ''' dec_mat", "is not easy to find for a instance, which are", "== None: p0 = np.eye(n) ''' % In the example", "sim[i][j] = tmp sim[j][i] = tmp sim = -sim print('", "#array([[False, True, True, False], # [ True, False, False, False],", "1/2/3 cluster it belongs to. #% #% clus e.g: #%", "= models[-1] y_pred_all[:, 0] = f_te(X_te, model_whole) #y_pred_all[:, 0] =", "for j in range(0, len(X_te) ): # for each testing", "#sim = f_eu_dist2(X_tr, X_te) def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None): # X:", "using 10-fold CV. corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version '''", "instances ids for each cluster, while inst_clus stores that for", "= [] for cur_nb in range(0, N): # for each", "np.zeros( y.size ) y_preds_whole = np.zeros( y.size ) fold_i =", "0.324, 0.028, 0.028;... % 0.048, 0.099, 0.039, 0.431, 0.027, 0.232,", "Created on Mon Mar 5 05:47:03 2018 @author: zg \"\"\"", "tree.DecisionTreeClassifier(), \\ # random_state=None, n_estimators = 100 ) # bagging.fit(X_tr,", "ct = a[all_neibs] return ct #test #>>> a = np.array([[1,2],", "scipy.spatial import distance #import matplotlib.pyplot as plt from sklearn.ensemble import", "if each_clus_sz == None: # on average, how many clusters", "walk algorithm. % A is the input net matrix, with", "[2,1,0,0,1,0,0],\\ [0,1,0,0,0,1,1],\\ [0,0,1,0,0,0,0],\\ [0,0,0,1,0,0,1],\\ [0,0,0,1,0,1,0]]) nSteps = 1000 lazi =", "[ True, True, True, False]]) def f_find_centers_rwMat(rw_mat, k): ''' %", "X_te = X y_te = y [y_pred, auc] = f_bg_svm_tr_te(X_tr,", "random_state=None, shuffle=True) skf.get_n_splits(X, y) for train_index, test_index in skf.split(X, y):", "table') return [clus, models, dec_ixs] def f_ICE_pred(X_tr, y_tr, X_te, clus,", "example, for the above matrix, nSteps = % 100, 1000", "array. lens = np.zeros(len(c1)) for i in range(0, len(c1)): lens[i]", "% 100 == 0: print(' done rwr ' + str(i-1)", "# corresponds to f_weka_bg_svm_tr_te() in Matlab version # ''' #", "range(0, k): tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten() clus.append(tmp) #", "Calculate the decision table # % This version changed from", "np.zeros((len(X), len(X) ) ) for i in range(0, len(X)): for", "= f_find_centers_rwMat(rw_mat, 10) def getCutoff(rw_mat, avgNeighborsSize): tmp = rw_mat.flatten('F') a", "TEST: \", test_index) X_tr, X_te = X[train_index], X[test_index] #y_tr, y_te", "#% 1 0 0 0 0 #% 1 1 1", "probability of returning start node np.fill_diagonal(rw, 0) rw_mat = rw", "model_whole.fit(X, y) # fit a model using all data rather", "# bagging.fit(X_tr, y_tr) # # y_pred = bagging.predict_proba(X_te) # y_pred", "******************************************************* [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print ('", "np.nanmean(y_pred_all[j, clus_ids_to_use]) print ('Done predicting testing instances.') return y_pred_ICE #", "% 2,1,0,0,1,0,0;... % 0,1,0,0,0,1,1;... % 0,0,1,0,0,0,0;... % 0,0,0,1,0,0,1;... % 0,0,0,1,0,1,0]", "target should be 1d n by 1 array. f_my_quantileNorm() '''", "copy the normed prediction to the whole data. y_pred_multi[clus[j].flatten(), j]", "pred_prob_mat[:, 0:n_clusters] = y_pred_multi # make a tmp array a", "predicted prob at first. # # % This version 3", "stores the instance indices for each cluster. However, this data", "fo_inner) # quantileNorm templete = y_pred_whole[clus[j].flatten()] target = y_pred y_pred", "pred_prob_mat[:, n_clusters] = y_pred_whole print (' Done evaluation using whole", "0 pl2norm = l2norm return p # test RWR() '''", "return [y_pred, auc] # test ''' X_tr = X y_tr", "X[train_index], X[test_index] y_tr, y_te = y[train_index], y[test_index] [clus, models, dec_ixs]", "training data to predict the testing data. ''' y_pred_all =", "soft % clustering. If we just random pickup some nodes", "even a remote cluster. each instance in dec_ixs does not", "y_pred print(' c-'+str(j)+' done predicting remote instances') except ValueError as", "try using the predicted prob at first. # # %", "import tree import copy import numpy.matlib from sklearn.exceptions import NotFittedError", "0.2500 0 0 0 0.5000 0.5000 % 0 0 0.2500", "tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten() clus.append(tmp) # --------------------------------------------------------------- #", "Use each cluster of training data to predict the testing", "y_preds_whole = np.zeros( y.size ) fold_i = 1 for train_index,", "f_my_quantileNorm() ''' ix_target = np.argsort(target, kind='mergesort') ix_templete = np.argsort(templete, kind='mergesort')", "1].flatten() # quantileNorm templete = y_pred_whole[ix_other] target = y_pred y_pred", "auc = roc_auc_score(y_te.flatten(), y_pred) # # return [y_pred, auc] def", "utf-8 -*- \"\"\" Created on Mon Mar 5 05:47:03 2018", "prediction to the whole data. y_pred_multi[clus[j].flatten(), j] = y_pred print('", "than only a fold pred_prob_mat[:, n_clusters] = y_pred_whole print ('", "in Matlab version ''' n_clusters = len(clus) # err_mat stores", "len(A) if p0 == None: p0 = np.eye(n) ''' %", "of each cluster: ') print(lens[ix]) print(' done RWR clustering') return", "for each testing instance ---------- #n_partials = np.zeros( len(X_te) )", "cluster data to predict the whole instances, while self %", "X_te = X[train_index], X[test_index] #y_tr, y_te = y[train_index], y[test_index] y_tr", "instance in dec_ixs does not contain the whole set of", "in skf.split(X, y): # print(\"TRAIN:\", train_index, \"TEST:\", test_index) X_tr, X_te", "y_pred_ICE. # % ixsp is another cluster form. # #", "[] for i in range(0, k): tmp = np.argwhere(rw_net[:, ixs_centers[i]", "maybe will be used later as KNN pred # instead", "np.zeros(( len(X), len(X) )) for i in range(0, len(X)): for", "0 0 tfs = np.zeros((n_inst, len(clus)), dtype=bool) for i in", "column represents the propability for each node. each element in", "= f_k_fo(Xj, yj, model, fo_inner) # quantileNorm templete = y_pred_whole[clus[j].flatten()]", "y_pred_fo.fill(np.unique(y_tr)[0] ) #print y_pred_fo else: y_pred_fo = f_tr_te(X_tr, y_tr, X_te,", "= 100 #model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \\ # random_state=None,", "clus = [] for i in range(0, k): tmp =", ") print (' Done calculating error table and fitting ICE", "y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred else: if", "#tmp = f_find_centers_rwMat(rw_mat, 10) def getCutoff(rw_mat, avgNeighborsSize): tmp = rw_mat.flatten('F')", "- adv_whole; #dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self; row", "ixs] b = np.sum(tmp, axis=1) b[ixs] = np.inf # %", "+ beta*N ) ) clus_ids_to_use = clus_ids_to_use + [0] *", "beta = 1 k_fold = 10 aucs_ICE = [] aucs_whole", "100 ) [err_mat, models] = f_err_mat(X_tr, y_tr, clus, model) #", "test #tmp = f_find_centers_rwMat(rw_mat, 10) def getCutoff(rw_mat, avgNeighborsSize): tmp =", "#model_a_clust = copy.deepcopy(model) try: y_pred_te = f_te(X_te, model_i) except :", "have a very different range than the templete. templete and", "0 1.0000 0 0 % 0 0 0 0 0", "random_state=None, n_estimators = 100 ) #y_pred = f_k_fo(X, y, model,", "new # test #templete = X[:, 0] #target = X[:,", "values. # sim = sim-diag(diag(sim) ); np.fill_diagonal(sim, 0) # [~,", "each_clus_sz) rw_net = rw_mat > ct # % set the", "by each cluster for i in range(0, len(clus)): #Xi =", "for each instance, which may include even a remote cluster.", "diagnal to 1 np.fill_diagonal(rw_net, True) clus = [] for i", "= scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] # #model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ #", "= BaggingClassifier(base_estimator = svm.LinearSVC(), \\ # bagging = BaggingClassifier(BaseBagging, \\", "missing values by replacing them by mean. ''' def f_ICE_fit_2(X_tr,", "y_pred print(' c-'+str(j)+' done predicting local instances') # ------------------ for", "tmp sim[j][i] = tmp sim = -sim np.fill_diagonal(sim, 0) return", "y_tr) y_pred = model_inner.predict_proba(X_te) y_pred = y_pred[:, 1].flatten() #auc =", "function counts the length of each double array. lens =", "model using all data rather than only a fold pred_prob_mat[:,", "cluster information from cell array to mat. But for each", "#% ... #% 1 1 1 1 1 #% 1", "corresponds to f_dec_tab_4_bg_svm() in Matlab version # ''' # #n_clusters", "- set(clus[j].flatten()) ix_other = list(ix_other) #print ix_other X_other = X[ix_other", "local instances') # ------------------ for other ----------------- ix_other = set(range(0,", "version 3 adds the support for fuzzy clustering - one", ":); #dec_row(:, end ) = dec_row(:, end ) - adv_whole;", "# y_pred = np.zeros(yj.size) # y_pred.fill(np.unique(yj)[0]) #else: try: y_pred =", "instances. ''' # the first col is the prediction using", "true-false matrix. ''' if each_clus_sz == None: # on average,", "clus_ids_to_use + clus_id_to_use # also find neighbor's label. maybe will", "''' deal with missing values by replacing them by mean.", "1 # [~, I2] = sort(I); I2 = (np.argsort(I, kind='mergesort').T", "import spdiags from scipy.spatial import distance #import matplotlib.pyplot as plt", "in cluster, <= 15, skip...') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:,", "Matlab version ''' n_clusters = len(clus) # err_mat stores the", "models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s) #[clus,", "it is a zero matrix with the diag to %", "between instances ''' sim = np.zeros(( len(X), len(X) )) for", "np.argsort(target, kind='mergesort') ix_templete = np.argsort(templete, kind='mergesort') target[ix_target] = templete[ix_templete] new", "s=0.5): ''' This version use the err mat to re-clustering", "= f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs = f_clus_to_tfs(clus, len(X_tr)) #", "y_pred = f_k_fo(Xj, yj, model, fo_inner) # quantileNorm templete =", "= set(range(0, y.size)) - set(clus[j].flatten()) ix_other = list(ix_other) #print ix_other", "open(f_res, 'w') #for j in range(1, 50): for j in", "whole models used, based on parameters alpha, beta and N.", "= 100 w = 0.4 s = 0.5 N =", "import numpy.matlib from sklearn.exceptions import NotFittedError #import FuzzyRwrBagging as frb", "corresponds to f_weka_bg_svm_tr_te() in Matlab version # ''' # #bagging", "for i in range(0, len(X1) ): for j in range(0,", "= f_quantileNorm(templete, target) #else: # y_pred = np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0])", "len(inst_i_clus) > 0: row[inst_i_clus] = row[inst_i_clus] - adv_self #print row", "[y_pred, auc] def f_tr(X_tr, y_tr, model): model_inner = copy.deepcopy(model) model_inner.fit(X_tr,", "ct = getCutoff(b, 70) ''' def f_len_of_each_ele(c1): #% Assume c1", "= 100 ) [err_mat, models] = f_err_mat(X, y, clus, model)", ", :] #y_other = y[ix_other ] # predict #y_pred =", "BaseBagging): # ''' # corresponds to f_weka_bg_svm_tr_te() in Matlab version", "n_inst): #% convert the cluster information from cell array to", "numpy.matlib from sklearn.exceptions import NotFittedError #import FuzzyRwrBagging as frb #from", "cell array, and each element is a 1d double #%", "np.zeros( len(X_te) ) #n_wholes = np.zeros( len(X_te) ) for j", "(aRank). ''' # delete the diagnal values. # sim =", "# ## k_fold of inner cross-validation # #fo_inner = 10", "other nodes (aRank). ''' # delete the diagnal values. #", "last col stores the pred by whole data #models =", "pa + 'data/res_ICE_bg_svm_1_iter.txt' #f_res = pa + 'data/res_ICE_bg_svm_py.txt' f_res =", "train_index, test_index in skf.split(X, y): # print(\"TRAIN:\", train_index, \"TEST:\", test_index)", "p = p0 pl2norm = np.inf unchanged = 0 for", "% 0.5000 0.2500 0 0 1.0000 0 0 % 0", "y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model) y_pred[test_index] = y_pred_fo #auc", "y_pred_te y_pred_all[:, i+1] = y_pred return y_pred_all # test #y_pred_all", "p0 == None: p0 = np.eye(n) ''' % In the", "#print ix_top_neighbors # ---------- find all neighbors' picks ---------- clus_ids_to_use", "a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for", "0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425] % % Each", "pass def f_clus_to_tfs(clus, n_inst): #% convert the cluster information from", "tfs = f_clus_to_tfs(clus, len(X_tr)) # train models and calculate the", "sklearn.model_selection import StratifiedKFold #import sklearn from scipy.sparse import spdiags from", "array, and each element is a 1d double #% array.", "pa = './' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/' n_clus = 100 w", "+ 'data/res_ICE_SVM_py.txt' f = open(f_res, 'w') #for j in range(1,", "y_pred_fo #auc = roc_auc_score(y.flatten(), y_pred) return y_pred # test #pa", "b = np.sum(tmp, axis=1) b[ixs] = np.inf # % find", "Xj = X[clus[j].flatten(), :] yj = y[clus[j].flatten() ] model_a_clust =", "thus we also need to convert clus to a true-false", "[False, False, True, False]]) # #array([[False, True, True, True], #", "which clusters should be used. dec_ixs = [] inst_clus =", "is the prediction using the whole data # start from", "the above matrix, nSteps = % 100, 1000 or 10000,", "= BaggingClassifier(BaseBagging, \\ # random_state=None, n_estimators = 100 ) #", "\\ # bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \\ # random_state=None,", "tfs # test #tfs = f_clus_to_tfs(clus, len(X)) # pass def", "f_quantileNorm(templete, target) # copy the normed prediction to the whole", "f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True): ''' Use the training data", "that for each instance, which cluster(s) it belongs to. dec_ixs", "[clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s)", "interpolation='nearest', aspect='auto') #plt.show() #sim = np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus =", "= 3 clus = f_fuzzy_rwr_clusters(X, n_clus) tfs = f_clus_to_tfs(clus, len(X))", "+ ' finished') fold_i = fold_i + 1 auc_ICE =", "- p) ** 2) ) ) p = pnew if", "0.117, 0.071, 0.071;... % 0.220, 0.167, 0.463, 0.052, 0.324, 0.028,", "clus_id_to_use = list( dec_ixs[cur_nb_ix] ) clus_ids_to_use = clus_ids_to_use + clus_id_to_use", "10: break else: unchanged = 0 pl2norm = l2norm return", "# [ True, False, False, True], # [ True, True,", "# 30:breast cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] #", "else: dec_ixs.append([]) return [dec_mat, dec_ixs] #[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs)", "clus, model) [dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s) print", "version # corresponds to f_k_fo() # ''' # y_pred =", "= y_pred_whole print (' Done evaluation using whole instances') print", "#% 1x5 cell #% 1x195 double 1x193 double 1x169 double", "StratifiedKFold #import sklearn from scipy.sparse import spdiags from scipy.spatial import", "laziness * p0 l2norm = max(np.sqrt(sum((pnew - p) ** 2)", "# #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred", "f_find_centers_rwMat(rw_mat, 10) def getCutoff(rw_mat, avgNeighborsSize): tmp = rw_mat.flatten('F') a =", "test_index in skf.split(X, y): # #print(\"TRAIN:\", train_index, \"TEST:\", test_index) #", "may have a very different range than the templete. templete", "for i in range(0, len(err_mat)): # Matlab code: #dec_row =", "delete the diagnal values. # sim = sim-diag(diag(sim) ); np.fill_diagonal(sim,", "0 0 0 0 0 % 0 0 0.2500 0", "= True return tfs # test #tfs = f_clus_to_tfs(clus, len(X))", "''' calculate the euclidean distance between instances from two datasets", "prediction using 10-fold CV. corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version", "dec_ixs[cur_nb_ix] ) clus_ids_to_use = clus_ids_to_use + clus_id_to_use # also find", "''' Calculate the decision table corresponds to f_dec_tab_4_bg_svm() in Matlab", "test_index in skf.split(X, y): # print(\"TRAIN:\", train_index, \"TEST:\", test_index) X_tr,", "auc] # test ''' X_tr = X y_tr = y", "in Matlab version # ''' # #bagging = BaggingClassifier(base_estimator =", "= len(clus) # ## dec_mat stores the prediction error. #", "#bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ # bagging = BaggingClassifier(base_estimator", "= scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer ##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X =", "place in y_pred_self and y_pred_other, thus do not # %", "tfs) def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True): ''' Use the", "= getCutoff(rw_mat, each_clus_sz) rw_net = rw_mat > ct # %", "the second col, the result is by each cluster for", "# X_tr, X_te = X[train_index], X[test_index] # y_tr, y_te =", "inst ' + str(j) #print ' ix of top neighbors:'", "[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \\ y_pred_whole, model, fo_inner)", "#print(\"TRAIN: \", train_index, \" TEST: \", test_index) X_tr, X_te =", "''' # y_pred = np.zeros((y.size, 1)) # # skf =", "tfs = np.zeros((n_inst, len(clus)), dtype=bool) for i in range(0, len(clus)):", "neighbors for each test instance neighbour_col = neighbour_mat[:, j].flatten() ix", "n_clus, model, w, s) y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus,", "#bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ #bagging = BaggingClassifier(BaseBagging, \\", "''' #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \\ #bagging = BaggingClassifier(BaseBagging,", "% support more than 3 clusters # % normalization take", "data # k: number of clusters ''' The return variable", "j] = y_pred print(' c-'+str(j)+' done predicting remote instances') except", "% 0 0 0.2500 0 0 0 0 % 0", "it belongs to, thus we also need to convert clus", "good ' #y_pred_te = f_tr_te(Xi, yi, X_te, model) if doNorm", "len(X)): for j in range(i+1, len(X)): tmp = distance.euclidean(X[i], X[j])", "np.matlib.repmat(a, 100, 1) ct = getCutoff(b, 70) ''' def f_len_of_each_ele(c1):", "a = clus_ids_to_use a = list( np.array(a) + 1 )", "0 0 1.0000 0 0 % 0 0.2500 0 0", "each cluster: ') print(lens[ix]) print(' done RWR clustering') return clus_ordered", "= y[ix_other ] # predict #y_pred = f_tr_te(Xj, yj, X_other,", "to evaluate each cluster ') # --------------------------- SELF ------------------------- #", "y_pred_ICE = np.zeros( len(X_te) ) neighbour_mat = f_eu_dist2(X_tr, X_te) #", "stores the pred by whole data #models = [] #", "len1 = len(rw_mat) #cutoffs = [] all_neibs = int( avgNeighborsSize", "in range(1, 49): try: X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer", "adv_whole; #dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self; row =", "= clus_ids_to_use + clus_id_to_use # also find neighbor's label. maybe", "result is by each cluster y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus,", "#% convert the cluster information from cell array to mat.", "aRankNet.T) # remove the diagonal 1s. # aRankNet = aRankNet-diag(diag(aRankNet)", "0.071;... % 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;... %", "outputs the predicted values of y. # % support more", "np.fill_diagonal(aRankNet, False) return aRankNet # test #sim = np.array([[0, 0.5566,", "= 1000 lazi = 0.3 RWR(A, nSteps, lazi, None) '''", "n, n) will be % 0.2500 0 0 0 0", "#from scipy import io import scipy.io #import pickle from sklearn.model_selection", "), dtype=bool) # dec_ixs: for each instance, which clusters should", "= scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\\ # '3_scripts/2017_4_4/data/names.mat')['names'] #mat = io.loadmat('/Users/zg/Desktop/a.mat')['names'] #test pa =", "on average, how many clusters does one inst belongs to.", "len(X1), len(X2) )) for i in range(0, len(X1) ): for", "------------------------- # # # --------------------------- SELF ------------------------- def f_err_mat(X, y,", "correct. def f_quantileNorm(templete, target): ''' Templete is the standard, change", "rw_net = rw_mat > ct # % set the diagnal", "= np.zeros(X_other.size) # y_pred.fill(np.unique(yj)[0]) # copy to the whole array", "each instance, which may include even a remote cluster. each", "#plt.show() #sim = np.corrcoef(X) #np.fill_diagonal(sim, 0) #n_clus = 100 #model", "np.zeros(len(X_te)) elif model_i == 1: y_pred_te = np.ones(len(X_te)) else: y_pred_te", "y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer #y", "train_index, \"TEST:\", test_index) # X_tr, X_te = X[train_index], X[test_index] #", "- one instance may # % belongs to more than", "test ''' X_tr = X y_tr = y X_te =", "clus, models) y_pred_ICE = np.zeros( len(X_te) ) neighbour_mat = f_eu_dist2(X_tr,", "instance indices for each cluster. However, this data structure is", "... #% 1 1 1 1 1 #% 1 0", "= y_pred_multi # make a tmp array a stores y", "1x193 double 1x169 double 1x161 double 1x62 double #% #%", "np.array([[1,2], [3,4]]) #>>> a.flatten() #array([1, 2, 3, 4]) #>>> a.flatten('F')", "dec_ixs, models,N,alpha,beta) y_preds_ICE[test_index] = y_pred_ICE y_pred_whole = f_tr_te(X_tr, y_tr, X_te,", "to predict the whole instances, while self % prediction using", "pa = '/Users/zg/Dropbox/bio/ICE_2018/' # pa = './' pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/'", "y_te = y[train_index], y[test_index] [clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr,", "15, skip...') y_pred = np.zeros(y.size) y_pred.fill(np.nan) y_pred_multi[:, j] = y_pred", "1: y_pred_fo = np.zeros( len(test_index) ) #print len(X_te) #print len(test_index)", "cancer y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y'] #X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma #y", "def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5): ''' Convert the err table", "as plt from sklearn.ensemble import BaggingClassifier from sklearn import svm", "k): tmp = rw_mat[:, ixs] b = np.sum(tmp, axis=1) b[ixs]", "11:mesothelioma #y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y'] n_clus = 3 clus = f_fuzzy_rwr_clusters(X,", "' + str(i-1) ) pnew = (1-laziness) * W.dot(p) +", "#print ' clus_ids_to_use:' #print clus_ids_to_use #print nei_labels y_pred_ICE[j] = np.nanmean(y_pred_all[j,", "''' # delete the diagnal values. # sim = sim-diag(diag(sim)", "# ''' # y_pred = np.zeros((y.size, 1)) # # skf", "know #% what is the top 1/2/3 cluster it belongs", "kind='mergesort').T + 1).T # for every column, just keep the", ") #print row row[-1] = row[-1] - adv_whole inst_i_clus =", "calculating the A-rank KNN graph') # % -------- RWR --------", "based fuzzy clustering clus = f_fuzzy_rwr_clusters(X_tr, n_clus) #print clus[0] tfs", "k is the number of centroids. ''' ixs = []", "1.0000 0 0 % 0 0 0 0 0 0.5000", "aRankNet = I2 > (len(sim) - k) # make it", "is half the number of # instances. each_clus_sz=len(X)/3 print('RWR-based fuzzy", ") #print y_pred_fo else: y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model)", "evaluation criteria # % such as AUC, I will try" ]
[ ") parser.add_argument( '--synth_tiles', help='If using an ROI, synthetic tile defintion", "( SELECT pkey FROM wire_in_tile WHERE name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\" OR", "types. \"\"\" cur = conn.cursor() cur.execute( \"\"\" SELECT pkey, tile_type_pkey,", "FROM constant_sources') (track_pkey, ) = cur.fetchone() elif pin['port_type'] == 'GND':", "the inverter on CE input which is connected to VCC.", "for h in range(hilbert_curve.max_h + 1): coord = tuple(hilbert_curve.coordinates_from_distance(h)) if", "they are found on the TOP or BOTTOM of an", "(hclk_cmt_x, hclk_cmt_y) ) break def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number): if (hclk_tile,", "SELECT phy_tile_pkey FROM wire WHERE pkey = ? )\"\"\", (connection_box_wire_pkey,", "The IOI_SING are found on top or bottom of the", "coordinates. \"\"\" cur.execute( \"\"\" SELECT name FROM phy_tile WHERE name", "(track_pkey, ) ) result = cur2.fetchone() if result: canonical_loc =", "else: pip_name = None switch_id = get_switch_name( conn, graph, switch_name_map,", "synth_tiles input. All arches should have these synthetic tiles, search", "list( tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc']) ).values() ) assert len(option) > 0, (pin,", "= node.id update_connection_box( conn, graph, left_graph_node_pkey, node_idx, connection_box_map ) elif", "conn.cursor() @functools.lru_cache(maxsize=None) def get_pip_wire_names(pip_pkey): cur.execute( \"\"\"SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey FROM pip_in_tile", "in cur.execute(\"SELECT pkey FROM track WHERE alive = 1;\"): alive_tracks.add(track_pkey)", ") ) for tile, wire_name in cur: REBUF_SOURCES[(tile, wire_name)] =", "cur.fetchone() assert track_pkey is not None, ( tile_name, pin['wire'], wire_pkey", "not None: cur.execute( \"\"\" SELECT grid_x, grid_y FROM phy_tile WHERE", "populate_bufg_rebuf_map(conn): global REBUF_NODES REBUF_NODES = {} global REBUF_SOURCES REBUF_SOURCES =", "enumerate(rebuf_and_hrow_tiles): if tile_name is not None and tile_name.startswith(\"CLK_BUFG_REBUF\"): rebuf_to_hrow_map[tile_name] =", "capture group is dived by 50 to get the relative", "'' # REBUF stuff rebuf_key = (feature_path[0], feature_path[1]) if rebuf_key", "here we # map the PIP's feature to \"DIFF_OUT\" if", "connection_box_obj = graph.create_connection_box_object( x_dim=x_dim, y_dim=y_dim ) num_edges = get_number_graph_edges(conn, graph,", "reduce_connection_box('A_L10') 'A' >>> reduce_connection_box('B') 'B' >>> reduce_connection_box('B_L') 'B' \"\"\" box", "\"\"\" Returns physical grid dimensions. \"\"\" cur = conn.cursor() cur.execute(\"SELECT", "= create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire = create_get_site_as_tile_wire(cur) for node_idx, node in enumerate(graph.nodes):", "def get_tile_name(tile_pkey): cur.execute( \"\"\" SELECT name FROM phy_tile WHERE pkey", "synth_tiles, segment_id ) # Set of (src, sink, switch_id) tuples", "enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1) ) return ' '.join((feature, enable_cascout))", "IOI_SING are found on top or bottom of the whole", "of placing them, therefore, # when the relative pip is", "== tracks.Direction.TOP: assert top_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[top_graph_node_pkey]", "requires other features to be enabled. Some pips imply other", "hilbertcurve.hilbertcurve import HilbertCurve import math import prjxray.db from prjxray.roi import", "WHERE pkey IN ( SELECT phy_tile_pkey FROM tile_map WHERE tile_pkey", "import Roi import prjxray.grid as grid from lib.rr_graph import graph2", "track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance, resistance) in", "to # VPR. VPR cannot handle duplicate paths with the", "the prjxray db but there # is a tile-wide feature", "hrow_tile, m.group(1) ) ) elif m.group(2) == 'BOT': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile,", "# changes based on which IOI_SING is selected (top or", "graph, node_mapping, grid, synth_tiles): cur = conn.cursor() delayless_switch = graph.get_switch_id('__vpr_delayless_switch__')", "import_graph_edges(conn, graph, node_mapping): # First yield existing edges print('{} Importing", "src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone() cur.execute( \"\"\"SELECT name FROM wire_in_tile WHERE", "x_min, y_min, x_max, y_max = map(int, args.graph_limit.split(',')) roi = Roi(", "DESC LIMIT 1;\") x_max = cur.fetchone()[0] cur.execute(\"SELECT grid_y FROM phy_tile", "for grid_loc in graph.grid: if gnd_block_id == grid_loc.block_type_id: assert gnd_loc", "tile_name.startswith(\"CLK_BUFG_REBUF\"): rebuf_to_hrow_map[tile_name] = { \"above\": maybe_get_clk_hrow(i - 1), \"below\": maybe_get_clk_hrow(i", "= ? );\"\"\", (wire_pkey, ) ) (track_pkey, ) = cur.fetchone()", "value to attach to the source pip name that #", "remove_vpr_tile_prefix(tile_type) pin = m.group(2) tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey( node.loc.x_low, node.loc.y_low", "src_wire = src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature = \"{}{}\".format(dst_value, src_wire) feature =", "t: t['port_type'] == 'output', synth_tile['pins'] ) ) ) num_outpad =", "in point_map: point_map[(x, y)] = [] point_map[(x, y)].append(node.id) hilbert_curve =", "changed_feature = \"{}{}\".format(dst_value, src_wire) feature = \"{}{}\".format(unchanged_feature, changed_feature) feature_path =", "DESC LIMIT 1;\") y_max = cur.fetchone()[0] return x_max + 1,", ") return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature =", "y) not in point_map: point_map[(x, y)] = [] point_map[(x, y)].append(node.id)", "PIN_NAME_TO_PARTS.match(pin_name) assert m is not None, pin_name tile_type = m.group(1)", "pip is traversed, the correct fasm feature needs to be", "each REBUF tile its above and below CLK_HROW tile. Note", "(pip_pkey, ) ) src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone() cur.execute( \"\"\"SELECT name", "pin_name tile_type = m.group(1) tile_type = remove_vpr_tile_prefix(tile_type) pin = m.group(2)", "cur.execute(\"SELECT pkey FROM track WHERE alive = 1;\"): alive_tracks.add(track_pkey) print('{}", "tile_type_pkey FROM phy_tile WHERE pkey IN ( SELECT phy_tile_pkey FROM", "to be enabled. Some pips imply other features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10", "add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles): cur = conn.cursor() delayless_switch =", "x_max, y_min, y_max = cur.fetchone() cur.execute('SELECT idx, info FROM x_list;')", "rrgraph XML. # # TODO: This can be removed once", "SELECT phy_tile.name, wire_in_tile.name FROM wire INNER JOIN phy_tile ON phy_tile.pkey", "# # These connections are hard wires that connect IOB33M", "synth_tile in synth_tiles['tiles'].items(): num_inpad = len( list( filter( lambda t:", "switch_pkey is not None if switch_pkey not in switch_name_map: cur", "50 IOBs. # The IOI_SING are found on top or", "to rr_graph XML. \"\"\" import argparse import os.path from hilbertcurve.hilbertcurve", "not in switch_name_map: cur = conn.cursor() cur.execute( \"\"\"SELECT name FROM", "db it # is a pip. Instead of making it", ") ) results = cur.fetchall() assert len(results) == 1, site_as_tile_pkey", "FROM site_pin WHERE site_type_pkey = ? AND name = ?", "grid.dims() for tile in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) if gridinfo.tile_type", "HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER", "# second capture group is dived by 50 to get", "grid_loc in graph.grid: if gnd_block_id == grid_loc.block_type_id: assert gnd_loc is", "import get_wire_pkey, get_track_model import lib.rr_graph_capnp.graph2 as capnp_graph2 from prjxray_constant_site_pins import", "in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']: if pip in src_wire:", "'pad': 'GND', 'port_type': 'GND', 'is_clock': False, }, ], }, }", "\"\"\" SELECT name FROM segment WHERE pkey = ( SELECT", "wire_in_tile_pkey FROM rebuf_wires) ORDER BY rebuf_nodes.node_pkey;\"\"\" ) for node_pkey, rebuf_tile,", "pips imply other features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 \"\"\" #", "pin['port_type'] == 'input': graph.add_edge( src_node=node_mapping[track_node], sink_node=pin_node[0][0], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), )", "to VCC. # This sets the CE signal to constant", "src_wire: src_wire = src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature = \"{}{}\".format(dst_value, src_wire) feature", "feature_path[-1])) return ' '.join(features) m = HCLK_OUT.fullmatch(feature_path[-1]) if m: return", "First yield existing edges print('{} Importing existing edges.'.format(now())) for edge", "src_node = node_mapping[src_graph_node] sink_node = node_mapping[dest_graph_node] if pip_name is not", ") parser.add_argument( '--write_rr_node_map', required=True, help='Output map of graph_node_pkey to rr", "site_as_tile_pkey) def get_tile_and_site_as_tile_pkey(x, y): return tiles[(x, y)] return get_tile_and_site_as_tile_pkey def", "x_high, y_low, y_high, ptc, capacitance, resistance FROM graph_node WHERE track_pkey", "CLK_HROW_BOT_T and REBUF tiles. returns them in a list sorted", "below. rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles + [None] def maybe_get_clk_hrow(i):", "difference is that the TOP IOI_SING tile shares bits with", "node.id update_connection_box( conn, graph, right_graph_node_pkey, node_idx, connection_box_map ) elif side", "= None switch_id = get_switch_name( conn, graph, switch_name_map, switch_pkey )", "node.loc.y_low)] pin_name = graph.pin_ptc_to_name_map[ (gridloc.block_type_id, node.loc.ptc)] # Synthetic blocks are", "\"\"\" SELECT phy_tile.name, wire_in_tile.name FROM wire INNER JOIN phy_tile ON", "When this PIP is active the IOB operates in the", "# VPR coords terms. \"above\" and \"below\" mean the opposite...", "and num_inpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, pin['z_loc'], wire", "m = rebuf_wire_regexp.fullmatch(rebuf_wire_name) if m.group(2) == 'TOP': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1))", "enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1] ) return", "info)) return graph2.Channels( chan_width_max=chan_width_max, x_min=x_min, y_min=y_min, x_max=x_max, y_max=y_max, x_list=x_list, y_list=y_list,", "get_site_from_site_as_tile( site_as_tile_pkey ) cur.execute( \"\"\" SELECT pkey FROM wire_in_tile WHERE", "assert result is not None, (wire_in_tile_pkey, tile_pkey) ( top_graph_node_pkey, bottom_graph_node_pkey,", "tile_type = m.group(1) tile_type = remove_vpr_tile_prefix(tile_type) pin = m.group(2) tile_pkey,", "without the need of placing them, therefore, # when the", "Some pips imply other features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 \"\"\"", "= 'X' x_low = max(x_low, 1) elif node_type == graph2.NodeType.CHANY:", "# IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0 # # These connections are hard", "get_pip_wire_names(pip_pkey) if not backward: pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net) else:", "in node_mapping: continue if pip_pkey is not None: tile_name =", "WHERE pkey = ? ) ) ) )\"\"\", (graph_node_pkey, )", "? AND name = ? ) AND site_pkey = ?", "# VPR. VPR cannot handle duplicate paths with the same", "= graph.loc_map[(node.loc.x_low, node.loc.y_low)] pin_name = graph.pin_ptc_to_name_map[ (gridloc.block_type_id, node.loc.ptc)] # Synthetic", "= hclk_y while hclk_cmt_x > 0: hclk_cmt_x -= 1 gridinfo", "sizing=graph2.SwitchSizing( mux_trans_size=0, buf_size=0, ), ) ) # Mapping of graph_node.pkey", "tile, while the BOTTOM IOI_SING # shares bits with the", "BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT", "== tracks.Direction.RIGHT: assert right_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[right_graph_node_pkey]", "print('{} generating routing graph for ROI.'.format(now())) elif args.graph_limit: use_roi =", "on which IOI_SING is selected (top or bottom) # #", "cur.fetchall()] def populate_bufg_rebuf_map(conn): global REBUF_NODES REBUF_NODES = {} global REBUF_SOURCES", "# # Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value = '1' if", "the architecture as it is defined one # level up", "features = [feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return", "if feature: yield ( src_node, sink_node, switch_id, (('fasm_features', feature), )", "FROM phy_tile ORDER BY grid_x DESC LIMIT 1;\") x_max =", "cur.execute(\"\"\" SELECT name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, switch_type FROM switch;\"\"\"):", "= HCLK_CMT_TILES[(hclk_tile, lr)] return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)] def check_feature(feature): \"\"\" Check", "serialize output to rr_graph XML. \"\"\" import argparse import os.path", "get_site_as_tile_wire = create_get_site_as_tile_wire(cur) for node_idx, node in enumerate(graph.nodes): if node.type", "SELECT pkey, site_as_tile_pkey, grid_x, grid_y FROM tile;\"\"\"): tiles[(grid_x, grid_y)] =", "IOI_SING tiles have bits in common with the IOI tiles.", "REBUF tiles. rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur) # Append None on both", "x1=x_min, y1=y_min, x2=x_max, y2=y_max, ) else: use_roi = False roi", "?; \"\"\", (tile_pkey, ) ) return cur.fetchone()[0] return get_tile_name def", "# Synthetic blocks are handled below. if pin_name.startswith('SYN-'): set_connection_box( graph,", "Adding synthetic edges'.format(now())) add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles) print('{} Creating", "if args.synth_tiles: use_roi = True with open(args.synth_tiles) as f: synth_tiles", "elif pin['port_type'] == 'GND': tile_type = 'SYN-GND' wire = 'GND'", "REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"below\"] if hrow_tile is", ") src_node = node_mapping[src_graph_node] sink_node = node_mapping[dest_graph_node] if pip_name is", "from prjxray.roi import Roi import prjxray.grid as grid from lib.rr_graph", "tile_map WHERE tile_pkey = ? ) );\"\"\", (pin, tile_pkey) )", "track_node in node_mapping, (track_node, track_pkey) if wire == 'inpad' and", "False, side def import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id): cur =", "of a normal IOI TILE. # # The following, is", "1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, pin['z_loc'], wire ) elif wire", "(site_type_pkey, pin, site_pkey) ) results = cur.fetchall() assert len(results) ==", "node map.'.format(now())) with open(args.write_rr_node_map, 'wb') as f: pickle.dump(node_mapping, f) print('{}", "node_mapping: node_mapping[k] = node_remap(node_mapping[k]) print('{} Writing node map.'.format(now())) with open(args.write_rr_node_map,", "None: cur.execute( \"\"\" SELECT grid_x, grid_y FROM phy_tile WHERE pkey", "( top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey ) = result side =", "reduce_connection_box('IMUX10') 'IMUX' >>> reduce_connection_box('BRAM_ADDR') 'IMUX' >>> reduce_connection_box('A_L10') 'A' >>> reduce_connection_box('B')", "feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # # When this PIP is active", "'0' if is_bottom_sing else '1' unchanged_feature = \"{}{}{}{}\".format( m.group(1), m.group(2),", "FROM graph_node WHERE pkey = ? ) ) ) )\"\"\",", "all connection box types. \"\"\" cur = conn.cursor() cur.execute( \"\"\"", "h in range(hilbert_curve.max_h + 1): coord = tuple(hilbert_curve.coordinates_from_distance(h)) if coord", "results[0][0] return wire_in_tile_pkey return get_site_as_tile_wire def import_graph_nodes(conn, graph, node_mapping, connection_box_map):", "# the bottom half of a normal IOI tile, while", "tile_pkey, site_as_tile_pkey, grid_x, grid_y in cur.execute(\"\"\" SELECT pkey, site_as_tile_pkey, grid_x,", "pkey IN ( SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey", "returns them in a list sorted according to their Y", "= [] for idx, info in cur: x_list.append(graph2.ChannelList(idx, info)) cur.execute('SELECT", "'tiles': { \"VCC\": { 'loc': vcc_loc, 'pins': [ { 'wire':", ") print('{} Starting routing import'.format(now())) args = parser.parse_args() db =", "pin_name) node_mapping[right_graph_node_pkey] = node.id update_connection_box( conn, graph, right_graph_node_pkey, node_idx, connection_box_map", "= re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK = re.compile('IOI_OCLK_([01])') # Regex", "Synthetic blocks are handled below. if pin_name.startswith('SYN-'): set_connection_box( graph, node_idx,", "re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$') def set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ):", "?;\"\"\", (dest_wire_in_tile_pkey, ) ) (dest_net, ) = cur.fetchone() return (src_net,", "'IMUX' >>> reduce_connection_box('IMUX1') 'IMUX' >>> reduce_connection_box('IMUX10') 'IMUX' >>> reduce_connection_box('BRAM_ADDR') 'IMUX'", "graph for ROI.'.format(now())) elif args.graph_limit: use_roi = True x_min, y_min,", "continue if dest_graph_node not in node_mapping: continue if pip_pkey is", "= rebuf_and_hrow_tiles[i] if tile is not None and tile.startswith(\"CLK_HROW\"): return", "\"\"\")): if src_graph_node not in node_mapping: continue if dest_graph_node not", "None: segment_name = result[0] segment_id = graph.get_segment_id_from_name(segment_name) else: segment_id =", "IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0 # # These connections are hard wires", "= ( SELECT canon_phy_tile_pkey FROM track WHERE pkey = ?", "feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features", "CLBLL_LL_A1) PIN_NAME_TO_PARTS = re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$') def set_connection_box( graph, node_idx, grid_x, grid_y,", "num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges, edges_obj=import_graph_edges(conn, graph, node_mapping), node_remap=node_remap, ) for k", "TOP IOI_SING tile shares bits with # the bottom half", "bits in common with the IOI tiles. # # The", "ON phy_tile.pkey = wire.phy_tile_pkey WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM", "node in enumerate(graph.nodes): if node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN): continue", "m.group(6).replace('_SING', '') for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']:", "return None # Assign each REBUF tile its above and", "INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey WHERE wire.node_pkey =", "there # is a tile-wide feature named \"DIFF_OUT\". # #", "= ( SELECT wire_in_tile_pkey FROM wire WHERE pkey = (", "gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']: continue hclk_x, hclk_y = grid.loc_of_tilename(tile)", "database and serialize output to rr_graph XML. \"\"\" import argparse", "lambda t: t['port_type'] == 'output', synth_tile['pins'] ) ) ) num_outpad", "else: cur.execute( \"\"\" SELECT pkey FROM wire_in_tile WHERE name =", "WHERE pkey = ( SELECT wire_in_tile_pkey FROM wire WHERE pkey", "and OPIN rr_nodes should already be present from the input", "it a pseudo-pip we simply reject fasm # features here.", "as bar: for idx, node in enumerate(nodes): yield node if", "switch_pkey not in switch_name_map: cur = conn.cursor() cur.execute( \"\"\"SELECT name", "IN ( SELECT phy_tile_pkey FROM tile_map WHERE tile_pkey = ?", "REBUF_NODES = {} global REBUF_SOURCES REBUF_SOURCES = {} rebuf_wire_regexp =", "= cur.fetchone()[0] set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay )", "graph.loc_map[(node.loc.x_low, node.loc.y_low)] pin_name = graph.pin_ptc_to_name_map[ (gridloc.block_type_id, node.loc.ptc)] # Synthetic blocks", "from prjxray-arch-import' ) parser.add_argument( '--graph_limit', help='Limit grid to specified dimensions", "# IOB_PADOUT1->IOB_DIFFI_IN0 # # These connections are hard wires that", "alive_tracks, node_mapping, graph, default_segment_id): cur = conn.cursor() cur2 = conn.cursor()", "hclk_cmt_x < x_max: hclk_cmt_x += 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y))", "None, ( tile_name, pin['wire'], wire_pkey ) elif pin['port_type'] == 'VCC':", "def create_node_remap(nodes, channels_obj): N = 2 p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max)))", "'{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"below\"] if hrow_tile is not", "switch_id = get_switch_name( conn, graph, switch_name_map, switch_pkey ) src_node =", "database. IPIN and OPIN rr_nodes should already be present from", ") # Set of (src, sink, switch_id) tuples that pip", "= grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break hclk_cmt_x = hclk_x while", "for node_pkey, rebuf_tile, rebuf_wire_name in cur: if node_pkey not in", "== \"IOB_PADOUT0\" and feature_path[1] == \"IOB_DIFFI_IN1\": return '' if feature_path[2]", "if is_bottom_sing else '1' unchanged_feature = \"{}{}{}{}\".format( m.group(1), m.group(2), m.group(3),", "cur.fetchone()[0] return x_max + 1, y_max + 1 def find_constant_network(graph):", "Regex for [LR]IOI_SING tiles IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_',", "in the prjxray db it # is a pip. Instead", "unused in arch xml, and so # were not emitted", "node_remap=node_remap, ) for k in node_mapping: node_mapping[k] = node_remap(node_mapping[k]) print('{}", "default_segment_id node_type = graph2.NodeType(graph_node_type) if node_type == graph2.NodeType.CHANX: direction =", "IOI_SING is selected (top or bottom) # # Example: IOI_OLOGIC0_D1.IOI_IMUX34_0", "synth_tiles = { 'tiles': { \"VCC\": { 'loc': vcc_loc, 'pins':", "= ?;\"\"\", (node_pkey, ) ) for tile, wire_name in cur:", "'{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1) ) return ' '.join((feature, enable_cascout)) parts =", "tile_pkey) ) results = cur.fetchall() assert len(results) == 1 wire_in_tile_pkey", "= cur.fetchone() switch_id = graph.get_switch_id(switch_name) switch_name_map[switch_pkey] = switch_id else: switch_id", "the IOB operates in the differential output mode. # There", "None, (tile_type, pin_name) node_mapping[right_graph_node_pkey] = node.id update_connection_box( conn, graph, right_graph_node_pkey,", "list. \"\"\" tile = rebuf_and_hrow_tiles[i] if tile is not None", "# This is the value to attach to the source", "' '.join(features) m = HCLK_OUT.fullmatch(feature_path[-1]) if m: return ' '.join(", "other features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 \"\"\" # IOI_SING tiles", "tiles IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_'] IOI_SING_REGEX =", "WITH rebuf_wires(wire_in_tile_pkey) AS ( SELECT pkey FROM wire_in_tile WHERE name", "help='Project X-Ray Database' ) parser.add_argument('--part', required=True, help='FPGA part') parser.add_argument( '--read_rr_graph',", "synthetic edges'.format(now())) add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles) print('{} Creating channels.'.format(now()))", "as grid from lib.rr_graph import graph2 from lib.rr_graph import tracks", "rebuf_wire_name) for node_pkey in REBUF_NODES: cur.execute( \"\"\" SELECT phy_tile.name, wire_in_tile.name", "src_node=node_mapping[track_node], sink_node=pin_node[0][0], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) elif pin['port_type'] in ['VCC',", "for t in cur.fetchall()] def populate_bufg_rebuf_map(conn): global REBUF_NODES REBUF_NODES =", "node_mapping, use_roi, roi, synth_tiles, segment_id ): cur = conn.cursor() cur.execute(\"\"\"SELECT", ") ) grid_x, grid_y = cur.fetchone() cur.execute( \"SELECT wire_in_tile_pkey FROM", "is not None: segment_name = result[0] segment_id = graph.get_segment_id_from_name(segment_name) else:", "FROM track WHERE alive = 1;\"): alive_tracks.add(track_pkey) print('{} Importing alive", "(tile_type, pin_name) node_mapping[left_graph_node_pkey] = node.id update_connection_box( conn, graph, left_graph_node_pkey, node_idx,", "sites are now routed through, without the need of placing", "tile-wide feature named \"DIFF_OUT\". # # The \"DIFF_OUT\" cannot be", "m.group(3), m.group(4) ) src_wire = m.group(6).replace('_SING', '') for pip in", "'{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]):", "src_net) else: pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net) else: pip_name =", "lr, hclk_number): if (hclk_tile, lr) not in HCLK_CMT_TILES: return []", "to change the edge feature to accomodate this # need,", "BY grid_y DESC LIMIT 1;\") y_max = cur.fetchone()[0] return x_max", "this also connects the synthetic IO tiles to the routing", "IN ( SELECT tile_type_pkey FROM phy_tile WHERE pkey IN (", "FROM track WHERE pkey = ? )\"\"\", (track_pkey, ) )", "= ( SELECT phy_tile_pkey FROM wire WHERE pkey = ?", "'VCC', 'port_type': 'VCC', 'is_clock': False, }, ], }, \"GND\": {", "update_connection_box( conn, graph, graph_node_pkey, node_idx, connection_box_map ): \"\"\" Update connection", "else: pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net) else: pip_name = None", "def create_get_pip_wire_names(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_pip_wire_names(pip_pkey): cur.execute( \"\"\"SELECT", "# Then yield edges from database. cur = conn.cursor() cur.execute(\"SELECT", "name of CLK_HROW tile only if its there on the", "track_pkey is not None, ( tile_name, pin['wire'], wire_pkey ) elif", "these synthetic tiles, search the input rr graph for the", "Roi( db=db, x1=x_min, y1=y_min, x2=x_max, y2=y_max, ) else: use_roi =", "graph2.Switch( id=None, name=name, type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming( r=drive_resistance, c_in=0.0, c_out=0.0, c_internal=internal_capacitance, t_del=intrinsic_delay,", "to rr node id. node_mapping = {} print('{} Creating connection", "cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey FROM graph_edge; \"\"\"): if src_graph_node not", "normal IOI TILE. # # The following, is to change", "# shares bits with the top half of a normal", "= CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m: x_loc_str = m.group(1) if 'L' in", "SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey IN (", "ids for all connection box types. \"\"\" cur = conn.cursor()", "if enabling this feature requires other features to be enabled.", "['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_'] IOI_SING_REGEX = re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format( \"|\".join(IOI_SITE_PIPS)", "cur2.fetchone() if result: canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1]) track = tracks.Track(", "'R'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break def find_hclk_cmt_hclk_feature(hclk_tile, lr,", "opposite... rebuf_to_hrow_map = {} for i, tile_name in enumerate(rebuf_and_hrow_tiles): if", "feature_path[0], m.group(1) ) return ' '.join((feature, enable_cascout)) parts = feature.split('.')", "all track graph nodes and add them. print('{} Creating tracks'.format(now()))", "{} rebuf_wire_regexp = re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' ) cur = conn.cursor() #", "tile_name in enumerate(rebuf_and_hrow_tiles): if tile_name is not None and tile_name.startswith(\"CLK_BUFG_REBUF\"):", "( SELECT connection_box_wire_pkey FROM graph_node WHERE connection_box_wire_pkey IS NOT NULL", "The relevant features are: # - IN_USE: to enable the", "in block_types gnd_block_id = block_types['SYN-GND'] vcc_block_id = block_types['SYN-VCC'] gnd_loc =", "# features here. if feature_path[2] == \"IOB_PADOUT0\" and feature_path[1] ==", ">>> reduce_connection_box('B') 'B' >>> reduce_connection_box('B_L') 'B' \"\"\" box = CONNECTION_BOX_FILTER.match(box).group(1)", "feature_path[1] == \"IOB_DIFFI_IN1\": return '' if feature_path[2] == \"IOB_PADOUT1\" and", "wire WHERE wire_in_tile_pkey = ? AND tile_pkey = ?;\"\"\", (wire_in_tile_pkey,", "if box.endswith('_L'): box = box.replace('_L', '') return box REBUF_NODES =", ");\"\"\", (pin, tile_pkey) ) results = cur.fetchall() assert len(results) ==", "half of a normal IOI tile, while the BOTTOM IOI_SING", "timing=graph2.SwitchTiming( r=drive_resistance, c_in=0.0, c_out=0.0, c_internal=internal_capacitance, t_del=intrinsic_delay, p_cost=penalty_cost, ), sizing=graph2.SwitchSizing( mux_trans_size=0,", "wire WHERE pkey = ? );\"\"\", (wire_pkey, ) ) (track_pkey,", "[feature] + find_hclk_cmt_hclk_feature( feature_path[0], m.group(1), m.group(2) ) ) m =", "grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT_L': HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc(", "following, is to change the edge feature to accomodate this", "== 1, site_as_tile_pkey return results[0] @functools.lru_cache(maxsize=0) def get_site_as_tile_wire(site_as_tile_pkey, pin): site_type_pkey,", ") def create_connection_boxes(conn, graph): \"\"\" Assign connection box ids for", "graph.add_track( track=track, segment_id=segment_id, ptc=ptc, timing=graph2.NodeTiming( r=resistance, c=capacitance, ), canonical_loc=canonical_loc )", "switch_id def create_get_tile_name(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_tile_name(tile_pkey): cur.execute(", "rebuf_wire_regexp.fullmatch(rebuf_wire_name) if m.group(2) == 'TOP': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1)) ) hrow_tile", "should have these synthetic tiles, search the input rr graph", "HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK = re.compile('IOI_OCLK_([01])') # Regex for [LR]IOI_SING", "this connection as a PIP but in the prjxray db", "grid_y = cur.fetchone() cur.execute( \"SELECT wire_in_tile_pkey FROM wire WHERE pkey", "All arches should have these synthetic tiles, search the input", "idx, info in cur: y_list.append(graph2.ChannelList(idx, info)) return graph2.Channels( chan_width_max=chan_width_max, x_min=x_min,", ");\"\"\", (wire_pkey, ) ) (track_pkey, ) = cur.fetchone() assert track_pkey", "ORDER BY grid_y DESC; \"\"\" ) return [t[0] for t", "for edge in graph.edges: yield (edge.src_node, edge.sink_node, edge.switch_id, None) #", "args = parser.parse_args() db = prjxray.db.Database(args.db_root, args.part) populate_hclk_cmt_tiles(db) synth_tiles =", "y_low, y_high, ptc, capacitance, resistance) in progressbar_utils.progressbar(cur.execute(\"\"\" SELECT pkey, track_pkey,", "get_track_model(conn, track_pkey) option = list( tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc']) ).values() ) assert", "GND tiles and create synth_tiles input. All arches should have", "mux_trans_size=0, buf_size=0, ), ) ) # Mapping of graph_node.pkey to", "def populate_hclk_cmt_tiles(db): global HCLK_CMT_TILES HCLK_CMT_TILES = {} grid = db.grid()", "= src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature = \"{}{}\".format(dst_value, src_wire) feature = \"{}{}\".format(unchanged_feature,", "1), \"below\": maybe_get_clk_hrow(i + 1), } # Find nodes touching", "FROM wire_in_tile WHERE pkey IN ( SELECT DISTINCT wire_in_tile_pkey FROM", "name LIKE \"CLK_HROW_BOT_R_%\" OR name LIKE \"CLK_HROW_TOP_R_%\" OR name LIKE", "not None and tile_name.startswith(\"CLK_BUFG_REBUF\"): rebuf_to_hrow_map[tile_name] = { \"above\": maybe_get_clk_hrow(i -", "rebuf_to_hrow_map[rebuf_tile][\"above\"] if hrow_tile is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1)", "wire WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ) SELECT", "edge in graph.edges: yield (edge.src_node, edge.sink_node, edge.switch_id, None) # Then", "cur.execute( \"\"\" SELECT switch.intrinsic_delay FROM switch WHERE pkey = (", ") src_wire = m.group(6).replace('_SING', '') for pip in ['IMUX', 'LOGIC_OUTS',", "0, (pin, len(option)) if pin['port_type'] == 'input': tile_type = synth_tile['tile_name']", "def import_graph_edges(conn, graph, node_mapping): # First yield existing edges print('{}", "f) print('{} Done writing node map.'.format(now())) if __name__ == '__main__':", "= m.group(2) bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format( feature_path[0],", "it # is a pip. Instead of making it a", "Add rr_nodes for CHANX and CHANY from the database. IPIN", "CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS = re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$') def set_connection_box( graph,", "{}'.format(num_channels, len(alive_tracks))) def add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles): cur =", "required=True, help='FPGA part') parser.add_argument( '--read_rr_graph', required=True, help='Input rr_graph file' )", "_, _ = grid.dims() for tile in grid.tiles(): gridinfo =", "if coord not in point_map: continue for old_id in point_map[coord]:", "right_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.TOP: assert top_graph_node_pkey", "not in alive_tracks: continue cur2.execute( \"\"\" SELECT name FROM segment", "enumerate(graph.nodes): if node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN): continue gridloc =", "sink_node, switch_id, ()) else: yield (src_node, sink_node, switch_id, ()) if", "= m.group(6).replace('_SING', '') for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN',", "CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m: x_loc_str = m.group(1) if 'L' in x_loc_str:", "= conn.cursor() cur.execute( \"\"\" SELECT connection_box_wire_pkey FROM graph_node WHERE pkey", "cur.execute( \"\"\" WITH rebuf_wires(wire_in_tile_pkey) AS ( SELECT pkey FROM wire_in_tile", "(x, y) not in point_map: point_map[(x, y)] = [] point_map[(x,", "except KeyError: capnp_graph.add_switch( graph2.Switch( id=None, name=name, type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming( r=drive_resistance, c_in=0.0,", ") elif side == tracks.Direction.TOP: assert top_graph_node_pkey is not None,", "# The following, is to change the edge feature to", "FROM site_as_tile INNER JOIN site ON site.pkey = site_as_tile.site_pkey WHERE", "pin) else: cur.execute( \"\"\" SELECT pkey FROM wire_in_tile WHERE name", "id_map = {} for h in range(hilbert_curve.max_h + 1): coord", "the synthetic IO tiles to the routing node specified. Rough", "x_min, x_max, y_min, y_max = cur.fetchone() cur.execute('SELECT idx, info FROM", "global REBUF_SOURCES REBUF_SOURCES = {} rebuf_wire_regexp = re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' )", "else: use_roi = False roi = None synth_tiles = None", "X-Ray Database' ) parser.add_argument('--part', required=True, help='FPGA part') parser.add_argument( '--read_rr_graph', required=True,", "= None vcc_loc = None for grid_loc in graph.grid: if", "not None: wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin) else: cur.execute( \"\"\" SELECT", "IS NOT NULL ) );\"\"\" ) connection_box_map = {} for", "= connection_box_map[wire_in_tile_pkey] cur.execute( \"\"\" SELECT switch.intrinsic_delay FROM switch WHERE pkey", "synthetic IO tiles to the routing node specified. Rough structure:", "This sets the CE signal to constant 1 m =", ") result = cur2.fetchone() if result is not None: segment_name", "1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT_L': HCLK_CMT_TILES[tile,", "not in HCLK_CMT_TILES: return [] hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)] return", "IPIN, OPIN, CHANX and CHANY rr_node ids in the rr_graph.", "dest_graph_node_pkey, switch_pkey, phy_tile_pkey, pip_in_tile_pkey, backward FROM graph_edge; \"\"\")): if src_graph_node", "pkey FROM wire_in_tile WHERE name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\" OR name LIKE", "switch WHERE pkey = ( SELECT site_pin_switch_pkey FROM wire_in_tile WHERE", "box = box.replace('_L', '') return box REBUF_NODES = {} REBUF_SOURCES", "\"\"\" node_dict = graph.nodes[node_idx]._asdict() node_dict['connection_box'] = graph2.ConnectionBox( x=grid_x, y=grid_y, id=box_id,", "WHERE wire_in_tile_pkey = ? AND tile_pkey = ?;\"\"\", (wire_in_tile_pkey, tile_pkey)", "= capnp_graph2.Graph( rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp' ), input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph, )", "wire_in_tile.pkey = wire.wire_in_tile_pkey INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey", "src_graph_node not in node_mapping: continue if dest_graph_node not in node_mapping:", "== graph2.NodeType.CHANY: direction = 'Y' y_low = max(y_low, 1) else:", "is not None assert vcc_loc is not None synth_tiles =", "as capnp_graph2 from prjxray_constant_site_pins import feature_when_routed from prjxray_tile_import import remove_vpr_tile_prefix", "pip_pkey is not None: tile_name = get_tile_name(phy_tile_pkey) src_net, dest_net =", "None synth_tiles = { 'tiles': { \"VCC\": { 'loc': vcc_loc,", "BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK", ">>> reduce_connection_box('A_L10') 'A' >>> reduce_connection_box('B') 'B' >>> reduce_connection_box('B_L') 'B' \"\"\"", "wire_feature) return feature # CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS =", "switch_name_map, switch_pkey ) src_node = node_mapping[src_graph_node] sink_node = node_mapping[dest_graph_node] if", "= tuple(hilbert_curve.coordinates_from_distance(h)) if coord not in point_map: continue for old_id", "in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature = \"{}{}\".format(dst_value, src_wire)", "rebuf_nodes INNER JOIN wire ON wire.node_pkey = rebuf_nodes.node_pkey INNER JOIN", "not in node_mapping node_mapping[graph_node_pkey] = graph.add_track( track=track, segment_id=segment_id, ptc=ptc, timing=graph2.NodeTiming(", "Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles. rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur) #", "graph, node_mapping): num_edges = len(graph.edges) print('{} Counting edges.'.format(now())) cur =", "assert right_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[right_graph_node_pkey] = node.id", "name FROM phy_tile WHERE name LIKE \"CLK_HROW_BOT_R_%\" OR name LIKE", "= ?;\"\"\", (switch_pkey, ) ) (switch_name, ) = cur.fetchone() switch_id", "reduce_connection_box('IMUX1') 'IMUX' >>> reduce_connection_box('IMUX10') 'IMUX' >>> reduce_connection_box('BRAM_ADDR') 'IMUX' >>> reduce_connection_box('A_L10')", "'{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0 # # These connections are", "wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey INNER JOIN phy_tile ON phy_tile.pkey", "Set of (src, sink, switch_id) tuples that pip edges have", "alive tracks'.format(now())) alive_tracks = set() for (track_pkey, ) in cur.execute(\"SELECT", "'BRAM_ADDR' in box: box = 'IMUX' if box.endswith('_L'): box =", "or bottom of the whole # IOI/IOB column. The Y", "column is_bottom_sing = int(m.group(2)) % 50 == 0 # This", "\"\"\" SELECT grid_x, grid_y FROM phy_tile WHERE pkey = (", "idx, info FROM y_list;') y_list = [] for idx, info", "\"IOB_DIFFO_OUT0\" and \\ feature_path[1] == \"IOB_DIFFO_IN1\": return '{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1", "IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # # When this PIP is active the IOB", "use_roi, roi, synth_tiles, segment_id ) # Set of (src, sink,", "cur = conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\") (num_edges, )", "len( list( filter( lambda t: t['port_type'] == 'output', synth_tile['pins'] )", "change the edge feature to accomodate this # need, as", "graph_node's in the connection_database. print('{} Importing graph nodes'.format(now())) import_graph_nodes(conn, graph,", "defintion from prjxray-arch-import' ) parser.add_argument( '--graph_limit', help='Limit grid to specified", "clock region column is_bottom_sing = int(m.group(2)) % 50 == 0", "in cur.fetchall()] def populate_bufg_rebuf_map(conn): global REBUF_NODES REBUF_NODES = {} global", "feature_path[2] == \"IOB_PADOUT1\" and feature_path[1] == \"IOB_DIFFI_IN0\": return '' #", "), input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph, ) graph = capnp_graph.graph if synth_tiles", "WHERE track_pkey IS NOT NULL;\"\"\")): if track_pkey not in alive_tracks:", "'SYN-VCC' wire = 'VCC' elif pin['port_type'] == 'GND': tile_type =", "{} def get_clk_hrow_and_rebuf_tiles_sorted(cur): \"\"\" Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF", "'BOT': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"above\"] if hrow_tile", "return ' '.join( [feature] + find_hclk_cmt_hclk_feature( feature_path[0], m.group(1), m.group(2) )", "channels_obj.y_max))) point_map = {} for node in nodes: x =", "WHERE name = ? AND phy_tile_type_pkey IN ( SELECT tile_type_pkey", "import prjxray.grid as grid from lib.rr_graph import graph2 from lib.rr_graph", "(track_pkey, ) = cur.fetchone() else: assert False, pin['port_type'] tracks_model, track_nodes", "= idx idx += 1 del point_map[coord] return lambda x:", "return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format(", "Reduce the number of connection boxes by merging some. Examples:", "default_segment_id): cur = conn.cursor() cur2 = conn.cursor() for (graph_node_pkey, track_pkey,", "c_internal=internal_capacitance, t_del=intrinsic_delay, p_cost=penalty_cost, ), sizing=graph2.SwitchSizing( mux_trans_size=0, buf_size=0, ), ) )", "connection as a PIP but in the prjxray db it", "get the relative # position of the IOI_SING within the", "IOI_SING_REGEX.fullmatch(feature) if m: # Each clock region spans a total", ") parser.add_argument( '--vpr_capnp_schema_dir', help='Directory container VPR schema files', ) print('{}", "def import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id): cur = conn.cursor() cur2", "FROM tile_map WHERE tile_pkey = ? ) );\"\"\", (pin, tile_pkey)", ") for pin in synth_tile['pins']: if pin['port_type'] in ['input', 'output']:", "from the database. IPIN and OPIN rr_nodes should already be", "an IPIN node. \"\"\" node_dict = graph.nodes[node_idx]._asdict() node_dict['connection_box'] = graph2.ConnectionBox(", "x_low=x_low, x_high=x_high, y_low=y_low, y_high=y_high, ) assert graph_node_pkey not in node_mapping", "with # the bottom half of a normal IOI tile,", "connection_box_wire_pkey FROM graph_node WHERE pkey = ?\"\"\", (graph_node_pkey, ) )", "arch xml, and so # were not emitted in rrgraph", "1;\"): alive_tracks.add(track_pkey) print('{} Importing alive tracks'.format(now())) import_tracks(conn, alive_tracks, node_mapping, graph,", "'.join((feature, enable_buffer_feature)) # BUFHCE sites are now routed through, without", "node_idx, grid_x, grid_y, box_id, site_pin_delay ) def create_get_tile_and_site_as_tile_pkey(cur): tiles =", "cur.fetchone() cur.execute('SELECT idx, info FROM x_list;') x_list = [] for", "= rebuf_to_hrow_map[rebuf_tile][\"below\"] if hrow_tile is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile,", "feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return ' '.join(features) m = HCLK_OUT.fullmatch(feature_path[-1])", "not in point_map: continue for old_id in point_map[coord]: id_map[old_id] =", "phy_tile.pkey = wire.phy_tile_pkey WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires)", "dest_net) else: pip_name = None switch_id = get_switch_name( conn, graph,", "not in point_map: point_map[(x, y)] = [] point_map[(x, y)].append(node.id) hilbert_curve", "yield existing edges print('{} Importing existing edges.'.format(now())) for edge in", "for idx, info in cur: x_list.append(graph2.ChannelList(idx, info)) cur.execute('SELECT idx, info", "uri=True) as conn: populate_bufg_rebuf_map(conn) cur = conn.cursor() for name, internal_capacitance,", ") num_edges = get_number_graph_edges(conn, graph, node_mapping) print('{} Serializing to disk.'.format(now()))", "= ( SELECT node_pkey FROM graph_node WHERE pkey = ?", "rebuf_wires) ORDER BY rebuf_nodes.node_pkey;\"\"\" ) for node_pkey, rebuf_tile, rebuf_wire_name in", "wire_in_tile.pkey = wire.wire_in_tile_pkey WHERE wire.node_pkey = ?;\"\"\", (node_pkey, ) )", "canon_phy_tile_pkey FROM track WHERE pkey = ? )\"\"\", (track_pkey, )", "'GND', 'is_clock': False, }, ], }, } } return synth_tiles", "from prjxray_constant_site_pins import feature_when_routed from prjxray_tile_import import remove_vpr_tile_prefix import simplejson", "FROM wire_in_tile WHERE site_pin_pkey = ( SELECT pkey FROM site_pin", "ptc, capacitance, resistance) in progressbar_utils.progressbar(cur.execute(\"\"\" SELECT pkey, track_pkey, graph_node_type, x_low,", "track_node = track_nodes[option[0]] assert track_node in node_mapping, (track_node, track_pkey) if", "(node_pkey, ) ) for tile, wire_name in cur: REBUF_SOURCES[(tile, wire_name)]", "right_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[right_graph_node_pkey] = node.id update_connection_box(", "= re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT =", "capacitance, resistance) in progressbar_utils.progressbar(cur.execute(\"\"\" SELECT pkey, track_pkey, graph_node_type, x_low, x_high,", "= cur.fetchall() assert len(results) == 1 wire_in_tile_pkey = results[0][0] tile_pkey,", "cur = conn.cursor() cur.execute( \"\"\"SELECT name FROM switch WHERE pkey", "num_edges = len(graph.edges) print('{} Counting edges.'.format(now())) cur = conn.cursor() cur.execute(\"SELECT", "feature_path[1] == \"IOB_DIFFO_IN1\": return '{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0 #", "\"\"\"SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey FROM pip_in_tile WHERE pkey = ?;\"\"\", (pip_pkey,", "site ON site.pkey = site_as_tile.site_pkey WHERE site_as_tile.pkey = ?\"\"\", (site_as_tile_pkey,", "grid.gridinfo_at_tilename(tile) if gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']: continue hclk_x, hclk_y", "node_dict['connection_box'] = graph2.ConnectionBox( x=grid_x, y=grid_y, id=box_id, site_pin_delay=site_pin_delay, ) graph.nodes[node_idx] =", "# The \"DIFF_OUT\" cannot be set in the architecture as", "location of BUFHCE\" y_loc = m.group(2) bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc)", "\"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) elif m.group(2) == 'BOT': REBUF_NODES[node_pkey].append(", "\"\"\" cur = conn.cursor() cur.execute( \"\"\" SELECT pkey, tile_type_pkey, name", "(track_pkey, ) ) result = cur2.fetchone() if result is not", "= [] m = rebuf_wire_regexp.fullmatch(rebuf_wire_name) if m.group(2) == 'TOP': REBUF_NODES[node_pkey].append(", "graph_node.pkey to rr node id. node_mapping = {} print('{} Creating", "()) else: yield (src_node, sink_node, switch_id, ()) if idx %", "site_as_tile.site_pkey FROM site_as_tile INNER JOIN site ON site.pkey = site_as_tile.site_pkey", "'') return box REBUF_NODES = {} REBUF_SOURCES = {} def", "y=grid_y, id=box_id, site_pin_delay=site_pin_delay, ) graph.nodes[node_idx] = graph2.Node(**node_dict) def update_connection_box( conn,", "[] for idx, info in cur: x_list.append(graph2.ChannelList(idx, info)) cur.execute('SELECT idx,", "= {} for h in range(hilbert_curve.max_h + 1): coord =", "graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance, resistance) in progressbar_utils.progressbar(cur.execute(\"\"\"", "(site_as_tile_pkey, ) ) results = cur.fetchall() assert len(results) == 1,", "CASCOUT_REGEX.fullmatch(feature_path[-2]) if m: enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1) ) return", "pin, site_pkey) ) results = cur.fetchall() assert len(results) == 1", "'.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m = IOI_OCLK.fullmatch(feature_path[1]) if m: enable_oclkm_feature =", "if synth_tiles is None: synth_tiles = find_constant_network(graph) with sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database), uri=True)", "= graph.get_switch_id('__vpr_delayless_switch__') for tile_name, synth_tile in synth_tiles['tiles'].items(): num_inpad = len(", "parts[0], wire_feature) return feature # CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS", "if wire == 'inpad' and num_inpad > 1: pin_name =", "OPIN rr_nodes should already be present from the input rr_graph.", "connection_box_wire_pkey FROM graph_node WHERE connection_box_wire_pkey IS NOT NULL ) );\"\"\"", "cur.execute( \"\"\" SELECT pkey FROM wire_in_tile WHERE site_pin_pkey = (", "else: yield (src_node, sink_node, switch_id, ()) if idx % 1024", "in block_types assert 'SYN-VCC' in block_types gnd_block_id = block_types['SYN-GND'] vcc_block_id", "help='If using an ROI, synthetic tile defintion from prjxray-arch-import' )", "= 'Y' y_low = max(y_low, 1) else: assert False, node_type", "(hclk_tile, lr) not in HCLK_CMT_TILES: return [] hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile,", "or bottom) # # Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value =", "signal to constant 1 m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m: x_loc_str", "side == tracks.Direction.BOTTOM: assert bottom_graph_node_pkey is not None, (tile_type, pin_name)", "pin_name) node_mapping[left_graph_node_pkey] = node.id update_connection_box( conn, graph, left_graph_node_pkey, node_idx, connection_box_map", "if feature_path[2] == \"IOB_PADOUT0\" and feature_path[1] == \"IOB_DIFFI_IN1\": return ''", ") ) else: yield (src_node, sink_node, switch_id, ()) else: yield", "yield node if idx % 1024 == 0: bar.update(idx) def", "node_type = graph2.NodeType(graph_node_type) if node_type == graph2.NodeType.CHANX: direction = 'X'", "tile_pkey) ) result = cur.fetchone() assert result is not None,", "tile_type, (pin['z_loc'] - num_inpad), wire ) else: pin_name = graph.create_pin_name_from_tile_type_and_pin(", "yield edges from database. cur = conn.cursor() cur.execute(\"SELECT count() FROM", "IN ( SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN", "1, site_as_tile_pkey return results[0] @functools.lru_cache(maxsize=0) def get_site_as_tile_wire(site_as_tile_pkey, pin): site_type_pkey, site_pkey", "roi = Roi( db=db, x1=x_min, y1=y_min, x2=x_max, y2=y_max, ) else:", "graph2.NodeType.CHANY: direction = 'Y' y_low = max(y_low, 1) else: assert", "Import alive tracks'.format(now())) alive_tracks = set() for (track_pkey, ) in", "Creating tracks'.format(now())) segment_id = graph.get_segment_id_from_name('dummy') create_track_rr_graph( conn, graph, node_mapping, use_roi,", "re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+')", "= conn.cursor() for (graph_node_pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high,", "switch_pkey, phy_tile_pkey, pip_pkey, backward) in enumerate(cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey, switch_pkey,", "y_list;') y_list = [] for idx, info in cur: y_list.append(graph2.ChannelList(idx,", ") ) (track_pkey, ) = cur.fetchone() assert track_pkey is not", "TILE. # # The following, is to change the edge", "WHERE connection_box_wire_pkey IS NOT NULL ) );\"\"\" ) connection_box_map =", ") ) def reduce_connection_box(box): \"\"\" Reduce the number of connection", "IS NOT NULL;\"\"\")): if track_pkey not in alive_tracks: continue cur2.execute(", "of the list to simplify the code below. rebuf_and_hrow_tiles =", "else: assert False, pin track_node = track_nodes[option[0]] assert track_node in", "block_types['SYN-VCC'] gnd_loc = None vcc_loc = None for grid_loc in", "re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*')", "result = cur2.fetchone() if result is not None: segment_name =", "yield (src_node, sink_node, switch_id, ()) if idx % 1024 ==", "both ends of the list to simplify the code below.", "if m: x_loc_str = m.group(1) if 'L' in x_loc_str: x_loc", "set_connection_box( graph, node_idx, node.loc.x_low, node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0., ) continue m", "with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar: for idx, node in enumerate(nodes): yield", "coord not in point_map: continue for old_id in point_map[coord]: id_map[old_id]", "'.join((feature, enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features = [feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1])", "phy_tile WHERE pkey = ( SELECT canon_phy_tile_pkey FROM track WHERE", "Vivado does not report this connection as a PIP but", "node_mapping: continue if dest_graph_node not in node_mapping: continue if pip_pkey", "grid_y, box_id, site_pin_delay ) def create_get_tile_and_site_as_tile_pkey(cur): tiles = {} for", "{}.{}'.format(feature, parts[0], wire_feature) return feature # CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1)", "'{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]):", "WHERE pkey = ( SELECT phy_tile_pkey FROM wire WHERE pkey", "grid_y FROM tile;\"\"\"): tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey) def get_tile_and_site_as_tile_pkey(x,", "chan_width_max, x_min, x_max, y_min, y_max FROM channel;\"\"\" ) chan_width_max, x_min,", "FROM switch;\"\"\"): # Add back missing switchs, which were unused", "IOI_SING tile shares bits with # the bottom half of", "m.group(1) if 'L' in x_loc_str: x_loc = 0 elif 'R'", "in the differential output mode. # There is no feature", "= get_track_model(conn, track_pkey) option = list( tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc']) ).values() )", "the opposite... rebuf_to_hrow_map = {} for i, tile_name in enumerate(rebuf_and_hrow_tiles):", "feature to accomodate this # need, as the IOI_SING tiles", "column. The Y coordinate identified with the # second capture", "SELECT chan_width_max, x_min, x_max, y_min, y_max FROM channel;\"\"\" ) chan_width_max,", "CE input which is connected to VCC. # This sets", "side == tracks.Direction.TOP: assert top_graph_node_pkey is not None, (tile_type, pin_name)", "to rr inode file' ) parser.add_argument( '--connection_database', help='Database of fabric", "num_edges += 1 return num_edges def import_graph_edges(conn, graph, node_mapping): #", "spans a total of 50 IOBs. # The IOI_SING are", "pkey = ?\"\"\", (graph_node_pkey, ) ) connection_box_wire_pkey = cur.fetchone()[0] if", "= ? ;\"\"\", (site_type_pkey, pin, site_pkey) ) results = cur.fetchall()", "wire ) elif wire == 'outpad' and num_outpad > 1:", "node_idx, connection_box_map ) else: assert False, side def import_tracks(conn, alive_tracks,", "if pip in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(src_value)) if 'IOI_OCLK'", "= '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1) ) return ' '.join((feature, enable_cascout)) parts", "import remove_vpr_tile_prefix import simplejson as json from lib import progressbar_utils", "to constant 1 m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m: x_loc_str =", "= box.replace('_L', '') return box REBUF_NODES = {} REBUF_SOURCES =", "ptc=ptc, timing=graph2.NodeTiming( r=resistance, c=capacitance, ), canonical_loc=canonical_loc ) def create_track_rr_graph( conn,", "= feature_when_routed(parts[1]) if wire_feature is not None: return '{} {}.{}'.format(feature,", "ROI configurations, this also connects the synthetic IO tiles to", "\"IOB_DIFFI_IN0\": return '' # REBUF stuff rebuf_key = (feature_path[0], feature_path[1])", "wire WHERE pkey = ?\", (connection_box_wire_pkey, ) ) wire_in_tile_pkey =", "= 0 elif 'R' in x_loc_str: x_loc = 1 else:", "are found on top or bottom of the whole #", "pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, (pin['z_loc'] - num_inpad), wire ) else:", "{} for h in range(hilbert_curve.max_h + 1): coord = tuple(hilbert_curve.coordinates_from_distance(h))", "capnp_graph.add_switch( graph2.Switch( id=None, name=name, type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming( r=drive_resistance, c_in=0.0, c_out=0.0, c_internal=internal_capacitance,", "pkey = ( SELECT segment_pkey FROM track WHERE pkey =", "CHANY rr_node ids in the rr_graph. Add rr_edge for each", "are found on the TOP or BOTTOM of an IOI", "conn, graph, graph_node_pkey, node_idx, connection_box_map ): \"\"\" Update connection box", "is_bottom_sing else '1' unchanged_feature = \"{}{}{}{}\".format( m.group(1), m.group(2), m.group(3), m.group(4)", "populate_bufg_rebuf_map(conn) cur = conn.cursor() for name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost,", "node_mapping, connection_box_map): cur = conn.cursor() get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire =", "required=True, help='Output map of graph_node_pkey to rr inode file' )", "SELECT top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey FROM wire WHERE wire_in_tile_pkey =", "wire_in_tile_pkey FROM wire WHERE pkey = ?\", (connection_box_wire_pkey, ) )", "is selected (top or bottom) # # Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 ->", "pip_in_tile WHERE pkey = ?;\"\"\", (pip_pkey, ) ) src_wire_in_tile_pkey, dest_wire_in_tile_pkey", "parser.add_argument( '--graph_limit', help='Limit grid to specified dimensions in x_min,y_min,x_max,y_max', )", "changed_feature) feature_path = feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # # When this", "rebuf_nodes.node_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey INNER JOIN", "= conn.cursor() @functools.lru_cache(maxsize=None) def get_pip_wire_names(pip_pkey): cur.execute( \"\"\"SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey FROM", "= get_site_as_tile_wire(site_as_tile_pkey, pin) else: cur.execute( \"\"\" SELECT pkey FROM wire_in_tile", "cur.execute( \"\"\"SELECT name FROM switch WHERE pkey = ?;\"\"\", (switch_pkey,", "= Roi( db=db, x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'], ) print('{} generating", "the source pip name that # changes based on which", "f: synth_tiles = json.load(f) roi = Roi( db=db, x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'],", "handle duplicate paths with the same switch id. print('{} Adding", "check_feature(feature): \"\"\" Check if enabling this feature requires other features", "BUFHCE sites are now routed through, without the need of", "hrow_tile is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) )", ") enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format( feature_path[0], bufhce_loc ) return ' '.join((feature,", "= graph.get_switch_id(switch_name) switch_name_map[switch_pkey] = switch_id else: switch_id = switch_name_map[switch_pkey] return", "wire.node_pkey = ?;\"\"\", (node_pkey, ) ) for tile, wire_name in", "import sqlite3 now = datetime.datetime.now HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX =", "and REBUF tiles. returns them in a list sorted according", "feature_path[0], m.group(1), m.group(2) ) ) m = CASCOUT_REGEX.fullmatch(feature_path[-2]) if m:", "in arch xml, and so # were not emitted in", "'X' x_low = max(x_low, 1) elif node_type == graph2.NodeType.CHANY: direction", "point_map: point_map[(x, y)] = [] point_map[(x, y)].append(node.id) hilbert_curve = HilbertCurve(p,", "rebuf wires. cur.execute( \"\"\" WITH rebuf_wires(wire_in_tile_pkey) AS ( SELECT pkey", "IOI_SING within the clock region column is_bottom_sing = int(m.group(2)) %", "as f: synth_tiles = json.load(f) roi = Roi( db=db, x1=synth_tiles['info']['GRID_X_MIN'],", "switch.intrinsic_delay FROM switch WHERE pkey = ( SELECT site_pin_switch_pkey FROM", "= rebuf_to_hrow_map[rebuf_tile][\"above\"] if hrow_tile is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile,", "elif side == tracks.Direction.TOP: assert top_graph_node_pkey is not None, (tile_type,", "src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature = \"{}{}\".format(dst_value, src_wire) feature = \"{}{}\".format(unchanged_feature, changed_feature)", "return '{} {}.{}'.format(feature, parts[0], wire_feature) return feature # CLBLL_L.CLBLL_LL_A1[0] ->", "2 p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max))) point_map = {} for node", "The Y coordinate identified with the # second capture group", "for (track_pkey, ) in cur.execute(\"SELECT pkey FROM track WHERE alive", "wire_name in cur: connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box( reduce_connection_box(wire_name) ) return connection_box_map", "src_node, sink_node, switch_id, (('fasm_features', feature), ) ) else: yield (src_node,", "def get_number_graph_edges(conn, graph, node_mapping): num_edges = len(graph.edges) print('{} Counting edges.'.format(now()))", "tile is not None and tile.startswith(\"CLK_HROW\"): return tile return None", "), canonical_loc=canonical_loc ) def create_track_rr_graph( conn, graph, node_mapping, use_roi, roi,", "'VCC', 'pad': 'VCC', 'port_type': 'VCC', 'is_clock': False, }, ], },", "enable_bufhce_in_use, enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] )", "continue gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)] pin_name = graph.pin_ptc_to_name_map[ (gridloc.block_type_id, node.loc.ptc)]", "graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance, resistance FROM graph_node", "of (src, sink, switch_id) tuples that pip edges have been", "through, without the need of placing them, therefore, # when", "connection_box_map ) elif side == tracks.Direction.RIGHT: assert right_graph_node_pkey is not", "import HilbertCurve import math import prjxray.db from prjxray.roi import Roi", ".ENABLE_BUFFER.HCLK_CK_BUFHCLK10 \"\"\" # IOI_SING tiles have bits in common with", "vcc_loc, 'pins': [ { 'wire': 'VCC', 'pad': 'VCC', 'port_type': 'VCC',", "in enumerate(nodes): yield node if idx % 1024 == 0:", "not None and tile.startswith(\"CLK_HROW\"): return tile return None # Assign", "REBUF_NODES: cur.execute( \"\"\" SELECT phy_tile.name, wire_in_tile.name FROM wire INNER JOIN", "True x_min, y_min, x_max, y_max = map(int, args.graph_limit.split(',')) roi =", "to their Y coordinates. \"\"\" cur.execute( \"\"\" SELECT name FROM", "box = CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR' in box: box = 'IMUX'", "prjxray db but there # is a tile-wide feature named", "determine X location of BUFHCE\" y_loc = m.group(2) bufhce_loc =", "elif pin['port_type'] == 'VCC': tile_type = 'SYN-VCC' wire = 'VCC'", "connection box of IPIN node if needed. \"\"\" cur =", "roi = Roi( db=db, x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'], ) print('{}", "JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey INNER JOIN phy_tile ON", "simplejson as json from lib import progressbar_utils import datetime import", "roi, synth_tiles, segment_id ) # Set of (src, sink, switch_id)", "alive tracks'.format(now())) import_tracks(conn, alive_tracks, node_mapping, graph, segment_id) print('original {} final", "pickle import sqlite3 now = datetime.datetime.now HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX", "= graph.create_pin_name_from_tile_type_and_pin( tile_type, wire ) pin_node = graph.get_nodes_for_pin( tuple(synth_tile['loc']), pin_name", "if result: canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1]) track = tracks.Track( direction=direction,", "= '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature)) if", "= True x_min, y_min, x_max, y_max = map(int, args.graph_limit.split(',')) roi", "# Walk all track graph nodes and add them. print('{}", "'IMUX' if box.endswith('_L'): box = box.replace('_L', '') return box REBUF_NODES", "hclk_cmt_x -= 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type ==", "architecture as it is defined one # level up in", "point_map[(x, y)] = [] point_map[(x, y)].append(node.id) hilbert_curve = HilbertCurve(p, N)", "0: bar.update(idx) def phy_grid_dims(conn): \"\"\" Returns physical grid dimensions. \"\"\"", "conn.cursor() cur.execute(\"\"\"SELECT count(*) FROM track;\"\"\") (num_channels, ) = cur.fetchone() print('{}", "lr)] return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)] def check_feature(feature): \"\"\" Check if enabling", "assert vcc_loc is None vcc_loc = (grid_loc.x, grid_loc.y) assert gnd_loc", "= ? ) AND site_pkey = ? ;\"\"\", (site_type_pkey, pin,", "result = cur2.fetchone() if result: canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1]) track", "not site-wide). So here we # map the PIP's feature", "switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) else: assert False, pin def get_switch_name(conn,", "not None, ( tile_name, pin['wire'], wire_pkey ) elif pin['port_type'] ==", "= 'inpad' elif pin['port_type'] == 'VCC': tile_type = 'SYN-VCC' wire", "search the input rr graph for the SYN-GND and SYN-VCC", "we simply reject fasm # features here. if feature_path[2] ==", "' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0],", "AS ( SELECT DISTINCT node_pkey FROM wire WHERE wire_in_tile_pkey IN", "bufhce_loc ) return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature", "SELECT src_graph_node_pkey, dest_graph_node_pkey FROM graph_edge; \"\"\"): if src_graph_node not in", "graph2.ConnectionBox( x=grid_x, y=grid_y, id=box_id, site_pin_delay=site_pin_delay, ) graph.nodes[node_idx] = graph2.Node(**node_dict) def", "for tile_name, synth_tile in synth_tiles['tiles'].items(): num_inpad = len( list( filter(", "'SYN-GND' in block_types assert 'SYN-VCC' in block_types gnd_block_id = block_types['SYN-GND']", "the SYN-GND and SYN-VCC tiles. \"\"\" block_types = {} for", "x_list;') x_list = [] for idx, info in cur: x_list.append(graph2.ChannelList(idx,", "the IOI_SITE_PIPS names # in the destination wire of the", "report this connection as a PIP but in the prjxray", "pkey = ? )\"\"\", (connection_box_wire_pkey, ) ) grid_x, grid_y =", "graph_node WHERE track_pkey IS NOT NULL;\"\"\")): if track_pkey not in", ") in cur.execute(\"SELECT pkey FROM track WHERE alive = 1;\"):", "switch_name_map[switch_pkey] = switch_id else: switch_id = switch_name_map[switch_pkey] return switch_id def", "# - ZINV_CE: to disable the inverter on CE input", "# The relevant features are: # - IN_USE: to enable", "\"\"\" Find VCC and GND tiles and create synth_tiles input.", "p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max))) point_map = {} for node in", "# Vivado does not report this connection as a PIP", "pin_name.startswith('SYN-'): set_connection_box( graph, node_idx, node.loc.x_low, node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0., ) continue", "not None if switch_pkey not in switch_name_map: cur = conn.cursor()", "already be present from the input rr_graph. Create a mapping", "(tile_pkey, site_as_tile_pkey) def get_tile_and_site_as_tile_pkey(x, y): return tiles[(x, y)] return get_tile_and_site_as_tile_pkey", "pickle.dump(node_mapping, f) print('{} Done writing node map.'.format(now())) if __name__ ==", "None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) else: assert False,", "canonical_loc = None cur2.execute( \"\"\" SELECT grid_x, grid_y FROM phy_tile", "connection_box_wire_pkey = cur.fetchone()[0] if connection_box_wire_pkey is not None: cur.execute( \"\"\"", "Example: IOI_OLOGIC0 -> IOI_OLOGIC1 dst_value = '0' if is_bottom_sing else", "= cur.fetchone() return (src_net, dest_net) return get_pip_wire_names def get_number_graph_edges(conn, graph,", "AS ( SELECT pkey FROM wire_in_tile WHERE name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\"", "the input rr graph for the SYN-GND and SYN-VCC tiles.", "Walk all track graph nodes and add them. print('{} Creating", "y_high, ptc, capacitance, resistance FROM graph_node WHERE track_pkey IS NOT", "database. cur = conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\") (num_edges,", "\"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) else: assert False, (rebuf_tile, rebuf_wire_name)", "False roi = None synth_tiles = None capnp_graph = capnp_graph2.Graph(", "of fabric connectivity', required=True ) parser.add_argument( '--synth_tiles', help='If using an", "bottom_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[bottom_graph_node_pkey] = node.id update_connection_box(", "x: id_map[x] def main(): parser = argparse.ArgumentParser() parser.add_argument( '--db_root', required=True,", "FROM wire WHERE pkey = ( SELECT site_wire_pkey FROM node", "cur.execute(\"\"\"SELECT count(*) FROM track;\"\"\") (num_channels, ) = cur.fetchone() print('{} Import", "connections are hard wires that connect IOB33M and IOB33S sites.", "region spans a total of 50 IOBs. # The IOI_SING", "grid_x, grid_y FROM phy_tile WHERE pkey = ( SELECT canon_phy_tile_pkey", ") (track_pkey, ) = cur.fetchone() assert track_pkey is not None,", "pip_name is not None: feature = check_feature(pip_name) if feature: yield", "connection_box_wire_pkey IS NOT NULL ) );\"\"\" ) connection_box_map = {}", "\"\"\" Update connection box of IPIN node if needed. \"\"\"", "cur.fetchone() else: assert False, pin['port_type'] tracks_model, track_nodes = get_track_model(conn, track_pkey)", "reduce_connection_box(box): \"\"\" Reduce the number of connection boxes by merging", ") )\"\"\", (graph_node_pkey, ) ) site_pin_delay = cur.fetchone()[0] set_connection_box( graph,", "sink, switch_id) tuples that pip edges have been sent to", "= re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK = re.compile('IOI_OCLK_([01])') # Regex for [LR]IOI_SING tiles", "IOI/IOB column. The Y coordinate identified with the # second", "enable_buffer_feature)) # BUFHCE sites are now routed through, without the", "file' ) parser.add_argument( '--write_rr_graph', required=True, help='Output rr_graph file' ) parser.add_argument(", "is not None, (tile_type, pin_name) node_mapping[right_graph_node_pkey] = node.id update_connection_box( conn,", ") graph.nodes[node_idx] = graph2.Node(**node_dict) def update_connection_box( conn, graph, graph_node_pkey, node_idx,", "= cur.fetchone() assert track_pkey is not None, ( tile_name, pin['wire'],", "= m.group(2) tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey( node.loc.x_low, node.loc.y_low ) if", "SELECT canon_phy_tile_pkey FROM track WHERE pkey = ? )\"\"\", (track_pkey,", "tracks'.format(now())) alive_tracks = set() for (track_pkey, ) in cur.execute(\"SELECT pkey", "grid_y FROM phy_tile WHERE pkey = ( SELECT canon_phy_tile_pkey FROM", "pin in synth_tile['pins']: if pin['port_type'] in ['input', 'output']: wire_pkey =", "(tile_pkey, ) ) return cur.fetchone()[0] return get_tile_name def create_get_pip_wire_names(conn): cur", "= '{}.{}.{}'.format(tile_name, src_net, dest_net) else: pip_name = None switch_id =", "IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ORDER BY rebuf_nodes.node_pkey;\"\"\" ) for", "but there # is a tile-wide feature named \"DIFF_OUT\". #", "graph.get_switch_id(name) continue except KeyError: capnp_graph.add_switch( graph2.Switch( id=None, name=name, type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming(", "y)] = [] point_map[(x, y)].append(node.id) hilbert_curve = HilbertCurve(p, N) idx", "filter( lambda t: t['port_type'] == 'input', synth_tile['pins'] ) ) )", "False, }, ], }, \"GND\": { 'loc': gnd_loc, 'pins': [", "and CHANY from the database. IPIN and OPIN rr_nodes should", "the BUFHCE site # - ZINV_CE: to disable the inverter", "? )\"\"\", (connection_box_wire_pkey, ) ) grid_x, grid_y = cur.fetchone() cur.execute(", "= prjxray.db.Database(args.db_root, args.part) populate_hclk_cmt_tiles(db) synth_tiles = None if args.synth_tiles: use_roi", "node_mapping: continue if pip_pkey is not None: tile_name = get_tile_name(phy_tile_pkey)", "return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m = IOI_OCLK.fullmatch(feature_path[1]) if m:", "IOI_SING # shares bits with the top half of a", "- num_inpad), wire ) else: pin_name = graph.create_pin_name_from_tile_type_and_pin( tile_type, wire", "m.group(2), m.group(3), m.group(4) ) src_wire = m.group(6).replace('_SING', '') for pip", "synth_tiles): cur = conn.cursor() delayless_switch = graph.get_switch_id('__vpr_delayless_switch__') for tile_name, synth_tile", "def import_graph_nodes(conn, graph, node_mapping, connection_box_map): cur = conn.cursor() get_tile_and_site_as_tile_pkey =", "with open(args.write_rr_node_map, 'wb') as f: pickle.dump(node_mapping, f) print('{} Done writing", "is not None, (tile_type, pin_name) node_mapping[left_graph_node_pkey] = node.id update_connection_box( conn,", "SELECT src_graph_node_pkey, dest_graph_node_pkey, switch_pkey, phy_tile_pkey, pip_in_tile_pkey, backward FROM graph_edge; \"\"\")):", "switch_pkey, phy_tile_pkey, pip_in_tile_pkey, backward FROM graph_edge; \"\"\")): if src_graph_node not", "= wire.wire_in_tile_pkey WHERE wire.node_pkey = ?;\"\"\", (node_pkey, ) ) for", "a pip. Instead of making it a pseudo-pip we simply", "(its tile-wide, not site-wide). So here we # map the", ") ) else: assert False, (rebuf_tile, rebuf_wire_name) for node_pkey in", "fabric connectivity', required=True ) parser.add_argument( '--synth_tiles', help='If using an ROI,", "gnd_loc is None gnd_loc = (grid_loc.x, grid_loc.y) if vcc_block_id ==", "a name of CLK_HROW tile only if its there on", "WHERE pkey = ?; \"\"\", (tile_pkey, ) ) return cur.fetchone()[0]", "= graph.get_segment_id_from_name(segment_name) else: segment_id = default_segment_id node_type = graph2.NodeType(graph_node_type) if", "and GND tiles and create synth_tiles input. All arches should", "' '.join( [feature] + find_hclk_cmt_hclk_feature( feature_path[0], m.group(1), m.group(2) ) )", "REBUF_SOURCES: return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m = IOI_OCLK.fullmatch(feature_path[1]) if", "emitted in rrgraph XML. # # TODO: This can be", "the list to simplify the code below. rebuf_and_hrow_tiles = [None]", "1) elif node_type == graph2.NodeType.CHANY: direction = 'Y' y_low =", "tiles. # # The difference is that the TOP IOI_SING", "\"\"\" WITH rebuf_wires(wire_in_tile_pkey) AS ( SELECT pkey FROM wire_in_tile WHERE", "WHERE pkey = ?\"\"\", (graph_node_pkey, ) ) connection_box_wire_pkey = cur.fetchone()[0]", "a mapping between database graph_nodes and IPIN, OPIN, CHANX and", "'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' ) cur = conn.cursor() # Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and", "m: enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1) ) return ' '.join((feature,", "needs to be added. # The relevant features are: #", "box_id, site_pin_delay ) def create_get_tile_and_site_as_tile_pkey(cur): tiles = {} for tile_pkey,", "= graph.add_track( track=track, segment_id=segment_id, ptc=ptc, timing=graph2.NodeTiming( r=resistance, c=capacitance, ), canonical_loc=canonical_loc", "'L'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break hclk_cmt_x = hclk_x", "# # The difference is that the TOP IOI_SING tile", "not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']: continue hclk_x, hclk_y = grid.loc_of_tilename(tile) hclk_cmt_x", "if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return '", "graph, left_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.RIGHT: assert", "rr_graph file' ) parser.add_argument( '--write_rr_node_map', required=True, help='Output map of graph_node_pkey", "switchs, which were unused in arch xml, and so #", "[feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return ' '.join(features)", "prjxray db it # is a pip. Instead of making", "IN ( SELECT connection_box_wire_pkey FROM graph_node WHERE connection_box_wire_pkey IS NOT", "src_wire: src_wire = src_wire.replace('_0', '_{}'.format(src_value)) if 'IOI_OCLK' in src_wire: src_wire", "[t[0] for t in cur.fetchall()] def populate_bufg_rebuf_map(conn): global REBUF_NODES REBUF_NODES", "m = IOI_SING_REGEX.fullmatch(feature) if m: # Each clock region spans", "tile shares bits with # the bottom half of a", "gnd_loc = None vcc_loc = None for grid_loc in graph.grid:", "list'.format(now())) connection_box_map = create_connection_boxes(conn, graph) # Match site pins rr", "in cur: x_list.append(graph2.ChannelList(idx, info)) cur.execute('SELECT idx, info FROM y_list;') y_list", "# # Example: IOI_OLOGIC0 -> IOI_OLOGIC1 dst_value = '0' if", "import'.format(now())) args = parser.parse_args() db = prjxray.db.Database(args.db_root, args.part) populate_hclk_cmt_tiles(db) synth_tiles", "wire_name in cur: REBUF_SOURCES[(tile, wire_name)] = node_pkey HCLK_CMT_TILES = {}", "pkey = ? ) ) ) )\"\"\", (graph_node_pkey, ) )", "None: feature = check_feature(pip_name) if feature: yield ( src_node, sink_node,", "= ?\", (connection_box_wire_pkey, ) ) wire_in_tile_pkey = cur.fetchone()[0] box_id =", "tracks.Direction.BOTTOM: assert bottom_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[bottom_graph_node_pkey] =", "block_types gnd_block_id = block_types['SYN-GND'] vcc_block_id = block_types['SYN-VCC'] gnd_loc = None", "needed. \"\"\" cur = conn.cursor() cur.execute( \"\"\" SELECT connection_box_wire_pkey FROM", "= ? ) ) ) )\"\"\", (graph_node_pkey, ) ) site_pin_delay", "IOI column m = IOI_SING_REGEX.fullmatch(feature) if m: # Each clock", "cur.fetchone() elif pin['port_type'] == 'GND': cur.execute('SELECT gnd_track_pkey FROM constant_sources') (track_pkey,", "# Regex for [LR]IOI_SING tiles IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY',", "= site_as_tile.site_pkey WHERE site_as_tile.pkey = ?\"\"\", (site_as_tile_pkey, ) ) results", "hard wires that connect IOB33M and IOB33S sites. # They", "graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ) def create_get_tile_and_site_as_tile_pkey(cur): tiles", "FROM graph_edge; \"\"\"): if src_graph_node not in node_mapping: continue if", "for all connection box types. \"\"\" cur = conn.cursor() cur.execute(", "output to rr_graph XML. \"\"\" import argparse import os.path from", "None, (wire_in_tile_pkey, tile_pkey) ( top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey ) =", "y_dim=y_dim ) num_edges = get_number_graph_edges(conn, graph, node_mapping) print('{} Serializing to", "'BYP']: if pip in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(src_value)) if", "synth_tiles = find_constant_network(graph) with sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database), uri=True) as conn: populate_bufg_rebuf_map(conn) cur", "cannot be set in the architecture as it is defined", "sink_node, switch_id, (('fasm_features', feature), ) ) else: yield (src_node, sink_node,", "'.join((feature, enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1] )", "switch WHERE pkey = ?;\"\"\", (switch_pkey, ) ) (switch_name, )", "?;\"\"\", (pip_pkey, ) ) src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone() cur.execute( \"\"\"SELECT", "count(*) FROM track;\"\"\") (num_channels, ) = cur.fetchone() print('{} Import alive", "grid from lib.rr_graph import graph2 from lib.rr_graph import tracks from", "with progressbar_utils.ProgressBar(max_value=num_edges) as bar: for idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey,", "pin['wire']) cur.execute( \"\"\" SELECT track_pkey FROM node WHERE pkey =", "while hclk_cmt_x < x_max: hclk_cmt_x += 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x,", "def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number): if (hclk_tile, lr) not in HCLK_CMT_TILES:", "+ REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m = IOI_OCLK.fullmatch(feature_path[1]) if m: enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format(", "the list. \"\"\" tile = rebuf_and_hrow_tiles[i] if tile is not", "'output', synth_tile['pins'] ) ) ) num_outpad = len( list( filter(", "a PIP but in the prjxray db it # is", "in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) if gridinfo.tile_type not in ['CLK_HROW_BOT_R',", "site_as_tile_pkey return results[0] @functools.lru_cache(maxsize=0) def get_site_as_tile_wire(site_as_tile_pkey, pin): site_type_pkey, site_pkey =", "== 'output', synth_tile['pins'] ) ) ) num_outpad = len( list(", "= json.load(f) roi = Roi( db=db, x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'],", "The following, is to change the edge feature to accomodate", "'VCC': cur.execute('SELECT vcc_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone() elif", "'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc ) enable_bufhce_zinv_ce =", "is not None, ( tile_name, pin['wire'], wire_pkey ) elif pin['port_type']", "that in # VPR coords terms. \"above\" and \"below\" mean", "bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey ) = result side = node.loc.side if", "= node.loc.side if side == tracks.Direction.LEFT: assert left_graph_node_pkey is not", "get_wire_pkey, get_track_model import lib.rr_graph_capnp.graph2 as capnp_graph2 from prjxray_constant_site_pins import feature_when_routed", "conn.cursor() for name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \\ switch_type in", "nodes and add them. print('{} Creating tracks'.format(now())) segment_id = graph.get_segment_id_from_name('dummy')", "tracks.Direction.TOP: assert top_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[top_graph_node_pkey] =", "once # https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 # is fixed. try: graph.get_switch_id(name) continue except", "None: tile_name = get_tile_name(phy_tile_pkey) src_net, dest_net = get_pip_wire_names(pip_pkey) if not", "node id. node_mapping = {} print('{} Creating connection box list'.format(now()))", "get_clk_hrow_and_rebuf_tiles_sorted(cur): \"\"\" Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles. returns", "name LIKE \"CLK_HROW_TOP_R_%\" OR name LIKE \"CLK_BUFG_REBUF_%\" ORDER BY grid_y", "Append None on both ends of the list to simplify", "(top or bottom) # # Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value", "'{}.{}.{}'.format(tile_name, dest_net, src_net) else: pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net) else:", "(track_pkey, ) = cur.fetchone() assert track_pkey is not None, (", "bufhce_loc ) enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format( feature_path[0], bufhce_loc ) return '", "with graph_node's in the connection_database. print('{} Importing graph nodes'.format(now())) import_graph_nodes(conn,", "def create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0) def get_site_from_site_as_tile(site_as_tile_pkey): cur.execute( \"\"\" SELECT site.site_type_pkey, site_as_tile.site_pkey", "src_net, dest_net = get_pip_wire_names(pip_pkey) if not backward: pip_name = '{}.{}.{}'.format(tile_name,", "channels.'.format(now())) channels_obj = create_channels(conn) node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj) x_dim, y_dim", "routing graph for ROI.'.format(now())) elif args.graph_limit: use_roi = True x_min,", "SELECT wire_in_tile_pkey FROM wire WHERE pkey = ( SELECT site_wire_pkey", "\"\"\" box = CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR' in box: box =", "and below CLK_HROW tile. Note that in # VPR coords", "name FROM segment WHERE pkey = ( SELECT segment_pkey FROM", "feature = check_feature(pip_name) if feature: yield ( src_node, sink_node, switch_id,", "\"\"\" cur = conn.cursor() cur.execute( \"\"\" SELECT connection_box_wire_pkey FROM graph_node", ") ) src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone() cur.execute( \"\"\"SELECT name FROM", "dest_graph_node_pkey FROM graph_edge; \"\"\"): if src_graph_node not in node_mapping: continue", "rr inode file' ) parser.add_argument( '--connection_database', help='Database of fabric connectivity',", "None and tile_name.startswith(\"CLK_BUFG_REBUF\"): rebuf_to_hrow_map[tile_name] = { \"above\": maybe_get_clk_hrow(i - 1),", "from lib.connection_database import get_wire_pkey, get_track_model import lib.rr_graph_capnp.graph2 as capnp_graph2 from", "now routed through, without the need of placing them, therefore,", "= len( list( filter( lambda t: t['port_type'] == 'output', synth_tile['pins']", "box types. \"\"\" cur = conn.cursor() cur.execute( \"\"\" SELECT pkey,", "gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT': HCLK_CMT_TILES[tile, 'L']", "node_type == graph2.NodeType.CHANY: direction = 'Y' y_low = max(y_low, 1)", "y_min, y_max = cur.fetchone() cur.execute('SELECT idx, info FROM x_list;') x_list", "Creating connection box list'.format(now())) connection_box_map = create_connection_boxes(conn, graph) # Match", "be set in the architecture as it is defined one", "it is defined one # level up in the hierarchy", "else: assert False, pin['port_type'] tracks_model, track_nodes = get_track_model(conn, track_pkey) option", "prjxray.db.Database(args.db_root, args.part) populate_hclk_cmt_tiles(db) synth_tiles = None if args.synth_tiles: use_roi =", "db=db, x1=x_min, y1=y_min, x2=x_max, y2=y_max, ) else: use_roi = False", "bits with # the bottom half of a normal IOI", "return get_tile_and_site_as_tile_pkey def create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0) def get_site_from_site_as_tile(site_as_tile_pkey): cur.execute( \"\"\" SELECT", "FROM wire_in_tile WHERE pkey = ( SELECT wire_in_tile_pkey FROM wire", "CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles. rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur) # Append", "an IOI column m = IOI_SING_REGEX.fullmatch(feature) if m: # Each", "hclk_cmt_y) ) break def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number): if (hclk_tile, lr)", "box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0., ) continue m = PIN_NAME_TO_PARTS.match(pin_name) assert m is", "y2=synth_tiles['info']['GRID_Y_MAX'], ) print('{} generating routing graph for ROI.'.format(now())) elif args.graph_limit:", "name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \\ switch_type in cur.execute(\"\"\" SELECT", "None capnp_graph = capnp_graph2.Graph( rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp' ), input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar,", "\"\"\" SELECT switch.intrinsic_delay FROM switch WHERE pkey = ( SELECT", "\"DIFF_OUT\" if feature_path[2] == \"IOB_DIFFO_OUT0\" and \\ feature_path[1] == \"IOB_DIFFO_IN1\":", "names # in the destination wire of the pip #", "(switch_name, ) = cur.fetchone() switch_id = graph.get_switch_id(switch_name) switch_name_map[switch_pkey] = switch_id", "synth_tiles def create_node_remap(nodes, channels_obj): N = 2 p = math.ceil(math.log2(max(channels_obj.x_max,", "= 'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc ) enable_bufhce_zinv_ce", ") = cur.fetchone() switch_id = graph.get_switch_id(switch_name) switch_name_map[switch_pkey] = switch_id else:", "assert 'SYN-VCC' in block_types gnd_block_id = block_types['SYN-GND'] vcc_block_id = block_types['SYN-VCC']", "were not emitted in rrgraph XML. # # TODO: This", "create_get_tile_name(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_tile_name(tile_pkey): cur.execute( \"\"\" SELECT", "and REBUF tiles. rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur) # Append None on", ") elif pin['port_type'] in ['VCC', 'GND', 'output']: graph.add_edge( src_node=pin_node[0][0], sink_node=node_mapping[track_node],", "y_min, x_max, y_max = map(int, args.graph_limit.split(',')) roi = Roi( db=db,", "node.loc.x_low, node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0., ) continue m = PIN_NAME_TO_PARTS.match(pin_name) assert", "a connection box to an IPIN node. \"\"\" node_dict =", "m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"below\"] if hrow_tile is not None:", "= '{}.{}.{}'.format(tile_name, dest_net, src_net) else: pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net)", "lib import progressbar_utils import datetime import re import functools import", "[LR]IOI_SING tiles IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_'] IOI_SING_REGEX", "node.id update_connection_box( conn, graph, left_graph_node_pkey, node_idx, connection_box_map ) elif side", "rr_nodes should already be present from the input rr_graph. Create", "(connection_box_wire_pkey, ) ) wire_in_tile_pkey = cur.fetchone()[0] box_id = connection_box_map[wire_in_tile_pkey] cur.execute(", "], }, } } return synth_tiles def create_node_remap(nodes, channels_obj): N", "map of graph_node_pkey to rr inode file' ) parser.add_argument( '--connection_database',", ") parser.add_argument( '--connection_database', help='Database of fabric connectivity', required=True ) parser.add_argument(", "'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_'] IOI_SING_REGEX = re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format( \"|\".join(IOI_SITE_PIPS) )", "tracks'.format(now())) segment_id = graph.get_segment_id_from_name('dummy') create_track_rr_graph( conn, graph, node_mapping, use_roi, roi,", "graph, node_mapping, use_roi, roi, synth_tiles, segment_id ) # Set of", "1 wire_in_tile_pkey = results[0][0] return wire_in_tile_pkey return get_site_as_tile_wire def import_graph_nodes(conn,", "wires. cur.execute( \"\"\" WITH rebuf_wires(wire_in_tile_pkey) AS ( SELECT pkey FROM", "else '1' unchanged_feature = \"{}{}{}{}\".format( m.group(1), m.group(2), m.group(3), m.group(4) )", "x_max, y_max = map(int, args.graph_limit.split(',')) roi = Roi( db=db, x1=x_min,", "cur = conn.cursor() cur.execute(\"SELECT grid_x FROM phy_tile ORDER BY grid_x", "sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database), uri=True) as conn: populate_bufg_rebuf_map(conn) cur = conn.cursor() for name,", "FROM graph_edge;\" \"\") for src_graph_node, dest_graph_node in cur.execute(\"\"\" SELECT src_graph_node_pkey,", "is no feature assosciated with that PIP in the prjxray", "JOIN wire ON wire.node_pkey = rebuf_nodes.node_pkey INNER JOIN wire_in_tile ON", "re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' ) cur = conn.cursor() # Find CLK_HROW_TOP_R, CLK_HROW_TOP_R", "track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance, resistance FROM", "= grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT_L': HCLK_CMT_TILES[tile, 'R'] =", "None cur2.execute( \"\"\" SELECT grid_x, grid_y FROM phy_tile WHERE pkey", "create_connection_boxes(conn, graph): \"\"\" Assign connection box ids for all connection", "pkey = ( SELECT canon_phy_tile_pkey FROM track WHERE pkey =", "= ( SELECT segment_pkey FROM track WHERE pkey = ?", "def add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles): cur = conn.cursor() delayless_switch", "'FAN', 'BYP']: if pip in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(src_value))", "graph.add_edge( src_node=node_mapping[track_node], sink_node=pin_node[0][0], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) elif pin['port_type'] in", ") (src_net, ) = cur.fetchone() cur.execute( \"\"\"SELECT name FROM wire_in_tile", "Returns physical grid dimensions. \"\"\" cur = conn.cursor() cur.execute(\"SELECT grid_x", "name=name, type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming( r=drive_resistance, c_in=0.0, c_out=0.0, c_internal=internal_capacitance, t_del=intrinsic_delay, p_cost=penalty_cost, ),", "node_mapping) print('{} Serializing to disk.'.format(now())) capnp_graph.serialize_to_capnp( channels_obj=channels_obj, connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes),", "check_feature(pip_name) if feature: yield ( src_node, sink_node, switch_id, (('fasm_features', feature),", "else: switch_id = switch_name_map[switch_pkey] return switch_id def create_get_tile_name(conn): cur =", "site_type_pkey = ? AND name = ? ) AND site_pkey", "IPIN node. \"\"\" node_dict = graph.nodes[node_idx]._asdict() node_dict['connection_box'] = graph2.ConnectionBox( x=grid_x,", "node_mapping[k] = node_remap(node_mapping[k]) print('{} Writing node map.'.format(now())) with open(args.write_rr_node_map, 'wb')", "< x_max: hclk_cmt_x += 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if", "mean the opposite... rebuf_to_hrow_map = {} for i, tile_name in", "# IOI_SING tiles have bits in common with the IOI", "'1' unchanged_feature = \"{}{}{}{}\".format( m.group(1), m.group(2), m.group(3), m.group(4) ) src_wire", "\"IOB_PADOUT1\" and feature_path[1] == \"IOB_DIFFI_IN0\": return '' # REBUF stuff", "we # map the PIP's feature to \"DIFF_OUT\" if feature_path[2]", "cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_tile_name(tile_pkey): cur.execute( \"\"\" SELECT name", "(dest_wire_in_tile_pkey, ) ) (dest_net, ) = cur.fetchone() return (src_net, dest_net)", "wire WHERE pkey IN ( SELECT connection_box_wire_pkey FROM graph_node WHERE", "hclk_cmt_x = hclk_x while hclk_cmt_x < x_max: hclk_cmt_x += 1", "assert False, pin['port_type'] tracks_model, track_nodes = get_track_model(conn, track_pkey) option =", "False, pin['port_type'] tracks_model, track_nodes = get_track_model(conn, track_pkey) option = list(", ") parser.add_argument( '--graph_limit', help='Limit grid to specified dimensions in x_min,y_min,x_max,y_max',", "is dived by 50 to get the relative # position", "segment_id=segment_id, ptc=ptc, timing=graph2.NodeTiming( r=resistance, c=capacitance, ), canonical_loc=canonical_loc ) def create_track_rr_graph(", "for tile in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) if gridinfo.tile_type not", "disk.'.format(now())) capnp_graph.serialize_to_capnp( channels_obj=channels_obj, connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges, edges_obj=import_graph_edges(conn, graph, node_mapping),", "is the value to attach to the IOI_SITE_PIPS names #", "feature_path[-1] ) return ' '.join((feature, enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature =", "= graph2.ConnectionBox( x=grid_x, y=grid_y, id=box_id, site_pin_delay=site_pin_delay, ) graph.nodes[node_idx] = graph2.Node(**node_dict)", "and SYN-VCC tiles. \"\"\" block_types = {} for block_type in", "pins rr nodes with graph_node's in the connection_database. print('{} Importing", "import_graph_nodes(conn, graph, node_mapping, connection_box_map) # Walk all track graph nodes", "graph.edges: yield (edge.src_node, edge.sink_node, edge.switch_id, None) # Then yield edges", ") else: assert False, pin def get_switch_name(conn, graph, switch_name_map, switch_pkey):", "tile defintion from prjxray-arch-import' ) parser.add_argument( '--graph_limit', help='Limit grid to", "src_wire = m.group(6).replace('_SING', '') for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL',", "if gridinfo.tile_type == 'HCLK_CMT_L': HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y)", "the clock region column is_bottom_sing = int(m.group(2)) % 50 ==", "_, x_max, _, _ = grid.dims() for tile in grid.tiles():", "rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur) # Append None on both ends of", "box = 'IMUX' if box.endswith('_L'): box = box.replace('_L', '') return", "# - IN_USE: to enable the BUFHCE site # -", "for node_idx, node in enumerate(graph.nodes): if node.type not in (graph2.NodeType.IPIN,", "cur.execute( \"\"\" SELECT connection_box_wire_pkey FROM graph_node WHERE pkey = ?\"\"\",", "\"\"\" SELECT track_pkey FROM node WHERE pkey = ( SELECT", "cur2.fetchone() if result is not None: segment_name = result[0] segment_id", "== 'input': tile_type = synth_tile['tile_name'] wire = 'outpad' elif pin['port_type']", "@functools.lru_cache(maxsize=None) def get_tile_name(tile_pkey): cur.execute( \"\"\" SELECT name FROM phy_tile WHERE", "\"\") (num_edges, ) = cur.fetchone() get_tile_name = create_get_tile_name(conn) get_pip_wire_names =", "tile = rebuf_and_hrow_tiles[i] if tile is not None and tile.startswith(\"CLK_HROW\"):", "node.loc.ptc)] # Synthetic blocks are handled below. if pin_name.startswith('SYN-'): set_connection_box(", "point_map = {} for node in nodes: x = node.loc.x_low", "rebuf_and_hrow_tiles + [None] def maybe_get_clk_hrow(i): \"\"\" Returns a name of", "for k in node_mapping: node_mapping[k] = node_remap(node_mapping[k]) print('{} Writing node", "rebuf_key = (feature_path[0], feature_path[1]) if rebuf_key in REBUF_SOURCES: return '", "( SELECT site_wire_pkey FROM node WHERE pkey = ( SELECT", "switch id. print('{} Adding synthetic edges'.format(now())) add_synthetic_edges(conn, graph, node_mapping, grid,", "IPIN node if needed. \"\"\" cur = conn.cursor() cur.execute( \"\"\"", "hclk_cmt_y) ) break hclk_cmt_x = hclk_x while hclk_cmt_x < x_max:", "graph_edge;\" \"\") (num_edges, ) = cur.fetchone() get_tile_name = create_get_tile_name(conn) get_pip_wire_names", "def find_constant_network(graph): \"\"\" Find VCC and GND tiles and create", "progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph, ) graph = capnp_graph.graph if synth_tiles is None:", "in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(src_value)) if 'IOI_OCLK' in src_wire:", "= conn.cursor() cur.execute(\"\"\"SELECT count(*) FROM track;\"\"\") (num_channels, ) = cur.fetchone()", "\"IOB_PADOUT0\" and feature_path[1] == \"IOB_DIFFI_IN1\": return '' if feature_path[2] ==", "capnp_graph.serialize_to_capnp( channels_obj=channels_obj, connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges, edges_obj=import_graph_edges(conn, graph, node_mapping), node_remap=node_remap,", "for block_type in graph.block_types: block_types[block_type.name] = block_type.id assert 'SYN-GND' in", "sites. # They are used in differential input mode. #", "the prjxray db it # is a pip. Instead of", "if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features = [feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0],", "WHERE pkey = ?\", (connection_box_wire_pkey, ) ) wire_in_tile_pkey = cur.fetchone()[0]", "' '.join((feature, enable_cascout)) parts = feature.split('.') wire_feature = feature_when_routed(parts[1]) if", "get_site_as_tile_wire def import_graph_nodes(conn, graph, node_mapping, connection_box_map): cur = conn.cursor() get_tile_and_site_as_tile_pkey", "input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph, ) graph = capnp_graph.graph if synth_tiles is", "constant 1 m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m: x_loc_str = m.group(1)", "'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']: if pip in src_wire: src_wire =", "list( filter( lambda t: t['port_type'] == 'input', synth_tile['pins'] ) )", "lib.connection_database import get_wire_pkey, get_track_model import lib.rr_graph_capnp.graph2 as capnp_graph2 from prjxray_constant_site_pins", "= node_mapping[src_graph_node] sink_node = node_mapping[dest_graph_node] if pip_name is not None:", "connection_box_map): cur = conn.cursor() get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire = create_get_site_as_tile_wire(cur)", "None and tile.startswith(\"CLK_HROW\"): return tile return None # Assign each", "OR name LIKE \"CLK_BUFG_REBUF_%\" ORDER BY grid_y DESC; \"\"\" )", "not None, (tile_type, pin_name) node_mapping[left_graph_node_pkey] = node.id update_connection_box( conn, graph,", "\"\"\" SELECT connection_box_wire_pkey FROM graph_node WHERE pkey = ?\"\"\", (graph_node_pkey,", "= create_connection_boxes(conn, graph) # Match site pins rr nodes with", "= get_wire_pkey(conn, tile_name, pin['wire']) cur.execute( \"\"\" SELECT track_pkey FROM node", "\"\"\" Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles. returns them", "box of IPIN node if needed. \"\"\" cur = conn.cursor()", "= [] point_map[(x, y)].append(node.id) hilbert_curve = HilbertCurve(p, N) idx =", "CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR' in box: box = 'IMUX' if box.endswith('_L'):", "num_inpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, pin['z_loc'], wire )", "= feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # # When this PIP is", "capnp_graph2 from prjxray_constant_site_pins import feature_when_routed from prjxray_tile_import import remove_vpr_tile_prefix import", "old_id in point_map[coord]: id_map[old_id] = idx idx += 1 del", ") pin_node = graph.get_nodes_for_pin( tuple(synth_tile['loc']), pin_name ) if pin['port_type'] ==", "> 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, pin['z_loc'], wire ) elif", "m.group(1) ) ) else: assert False, (rebuf_tile, rebuf_wire_name) for node_pkey", "-> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value = '1' if is_bottom_sing else '0' #", "as it is defined one # level up in the", "clock region spans a total of 50 IOBs. # The", "False, pin track_node = track_nodes[option[0]] assert track_node in node_mapping, (track_node,", "of BUFHCE\" y_loc = m.group(2) bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use", "cur.execute( \"\"\" SELECT grid_x, grid_y FROM phy_tile WHERE pkey =", "when the relative pip is traversed, the correct fasm feature", "FROM y_list;') y_list = [] for idx, info in cur:", "with the # second capture group is dived by 50", "assert bottom_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[bottom_graph_node_pkey] = node.id", "graph, right_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.TOP: assert", "cur.execute(\"SELECT count() FROM graph_edge;\" \"\") (num_edges, ) = cur.fetchone() get_tile_name", "= conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\") for src_graph_node, dest_graph_node", "y1=y_min, x2=x_max, y2=y_max, ) else: use_roi = False roi =", ") AND site_pkey = ? ;\"\"\", (site_type_pkey, pin, site_pkey) )", "wire_in_tile_pkey = ? AND tile_pkey = ?;\"\"\", (wire_in_tile_pkey, tile_pkey) )", "feature requires other features to be enabled. Some pips imply", "wire == 'outpad' and num_outpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin(", "node_mapping[dest_graph_node] if pip_name is not None: feature = check_feature(pip_name) if", "node. \"\"\" node_dict = graph.nodes[node_idx]._asdict() node_dict['connection_box'] = graph2.ConnectionBox( x=grid_x, y=grid_y,", "features here. if feature_path[2] == \"IOB_PADOUT0\" and feature_path[1] == \"IOB_DIFFI_IN1\":", "nodes touching rebuf wires. cur.execute( \"\"\" WITH rebuf_wires(wire_in_tile_pkey) AS (", "same wire, and pip names # despite they are found", "grid_y)] = (tile_pkey, site_as_tile_pkey) def get_tile_and_site_as_tile_pkey(x, y): return tiles[(x, y)]", "None: return '{} {}.{}'.format(feature, parts[0], wire_feature) return feature # CLBLL_L.CLBLL_LL_A1[0]", "attach to the IOI_SITE_PIPS names # in the destination wire", "is not None if switch_pkey not in switch_name_map: cur =", "name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, switch_type FROM switch;\"\"\"): # Add", "conn, graph, top_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.BOTTOM:", "tile. Note that in # VPR coords terms. \"above\" and", ") ) elif m.group(2) == 'BOT': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1)) )", "some. Examples: >>> reduce_connection_box('IMUX0') 'IMUX' >>> reduce_connection_box('IMUX1') 'IMUX' >>> reduce_connection_box('IMUX10')", "50 == 0 # This is the value to attach", "}, ], }, } } return synth_tiles def create_node_remap(nodes, channels_obj):", "graph_edge;\" \"\") for src_graph_node, dest_graph_node in cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey", "switch_name_map, switch_pkey): assert switch_pkey is not None if switch_pkey not", "= conn.cursor() cur.execute( \"\"\" SELECT chan_width_max, x_min, x_max, y_min, y_max", ")\"\"\", (connection_box_wire_pkey, ) ) grid_x, grid_y = cur.fetchone() cur.execute( \"SELECT", "return box REBUF_NODES = {} REBUF_SOURCES = {} def get_clk_hrow_and_rebuf_tiles_sorted(cur):", "wire, and pip names # despite they are found on", "grid_y FROM phy_tile WHERE pkey = ( SELECT phy_tile_pkey FROM", "not None: segment_name = result[0] segment_id = graph.get_segment_id_from_name(segment_name) else: segment_id", "if pin['port_type'] == 'input': tile_type = synth_tile['tile_name'] wire = 'outpad'", "'TOP': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"below\"] if hrow_tile", "from lib.rr_graph import graph2 from lib.rr_graph import tracks from lib.connection_database", "FROM switch WHERE pkey = ( SELECT site_pin_switch_pkey FROM wire_in_tile", "lib.rr_graph_capnp.graph2 as capnp_graph2 from prjxray_constant_site_pins import feature_when_routed from prjxray_tile_import import", "== 'VCC': cur.execute('SELECT vcc_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone()", "wire.phy_tile_pkey WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ORDER BY", "box.endswith('_L'): box = box.replace('_L', '') return box REBUF_NODES = {}", "x_low, x_high, y_low, y_high, ptc, capacitance, resistance FROM graph_node WHERE", "add them. print('{} Creating tracks'.format(now())) segment_id = graph.get_segment_id_from_name('dummy') create_track_rr_graph( conn,", "and CHANY rr_node ids in the rr_graph. Add rr_edge for", "idx += 1 del point_map[coord] return lambda x: id_map[x] def", "'.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1]", "import feature_when_routed from prjxray_tile_import import remove_vpr_tile_prefix import simplejson as json", "hclk_x while hclk_cmt_x < x_max: hclk_cmt_x += 1 gridinfo =", "IOB33M and IOB33S sites. # They are used in differential", "(pin['z_loc'] - num_inpad), wire ) else: pin_name = graph.create_pin_name_from_tile_type_and_pin( tile_type,", "range(hilbert_curve.max_h + 1): coord = tuple(hilbert_curve.coordinates_from_distance(h)) if coord not in", "if m: return ' '.join( [feature] + find_hclk_cmt_hclk_feature( feature_path[0], m.group(1),", "is None vcc_loc = (grid_loc.x, grid_loc.y) assert gnd_loc is not", "phy_grid_dims(conn): \"\"\" Returns physical grid dimensions. \"\"\" cur = conn.cursor()", "which is connected to VCC. # This sets the CE", "if node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN): continue gridloc = graph.loc_map[(node.loc.x_low,", "graph_node WHERE pkey = ? ) ) ) )\"\"\", (graph_node_pkey,", "So here we # map the PIP's feature to \"DIFF_OUT\"", ") );\"\"\", (pin, tile_pkey) ) results = cur.fetchall() assert len(results)", "WHERE pkey = ?;\"\"\", (switch_pkey, ) ) (switch_name, ) =", ")\"\"\", (graph_node_pkey, ) ) site_pin_delay = cur.fetchone()[0] set_connection_box( graph, node_idx,", "cur.fetchall() assert len(results) == 1 wire_in_tile_pkey = results[0][0] tile_pkey, _", "if hrow_tile is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) )", "WHERE site_as_tile.pkey = ?\"\"\", (site_as_tile_pkey, ) ) results = cur.fetchall()", "the IOI_SING within the clock region column is_bottom_sing = int(m.group(2))", "tiles. rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur) # Append None on both ends", "[] hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)] return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)] def check_feature(feature):", "m.group(2) tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey( node.loc.x_low, node.loc.y_low ) if site_as_tile_pkey", "\"\"\" Assign a connection box to an IPIN node. \"\"\"", "rebuf_to_hrow_map[rebuf_tile][\"below\"] if hrow_tile is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1)", "print('{} Importing alive tracks'.format(now())) import_tracks(conn, alive_tracks, node_mapping, graph, segment_id) print('original", "synth_tiles is None: synth_tiles = find_constant_network(graph) with sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database), uri=True) as", "r=resistance, c=capacitance, ), canonical_loc=canonical_loc ) def create_track_rr_graph( conn, graph, node_mapping,", "reduce_connection_box('BRAM_ADDR') 'IMUX' >>> reduce_connection_box('A_L10') 'A' >>> reduce_connection_box('B') 'B' >>> reduce_connection_box('B_L')", "synthetic tiles, search the input rr graph for the SYN-GND", "Rough structure: Add rr_nodes for CHANX and CHANY from the", "REBUF tiles. returns them in a list sorted according to", "should already be present from the input rr_graph. Create a", "edges.'.format(now())) cur = conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\") for", ") ) wire_in_tile_pkey = cur.fetchone()[0] box_id = connection_box_map[wire_in_tile_pkey] cur.execute( \"\"\"", "Counting edges.'.format(now())) cur = conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\")", "if pin_name.startswith('SYN-'): set_connection_box( graph, node_idx, node.loc.x_low, node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0., )", "cur.execute( \"\"\" SELECT pkey FROM wire_in_tile WHERE name = ?", "= conn.cursor() cur2 = conn.cursor() for (graph_node_pkey, track_pkey, graph_node_type, x_low,", "pin['wire'], wire_pkey ) elif pin['port_type'] == 'VCC': cur.execute('SELECT vcc_track_pkey FROM", "wire ) pin_node = graph.get_nodes_for_pin( tuple(synth_tile['loc']), pin_name ) if pin['port_type']", "argparse.ArgumentParser() parser.add_argument( '--db_root', required=True, help='Project X-Ray Database' ) parser.add_argument('--part', required=True,", "VPR schema files', ) print('{} Starting routing import'.format(now())) args =", "idx, node in enumerate(nodes): yield node if idx % 1024", "if tile is not None and tile.startswith(\"CLK_HROW\"): return tile return", "= node.id update_connection_box( conn, graph, right_graph_node_pkey, node_idx, connection_box_map ) elif", "graph2.NodeType.OPIN): continue gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)] pin_name = graph.pin_ptc_to_name_map[ (gridloc.block_type_id,", "graph.pin_ptc_to_name_map[ (gridloc.block_type_id, node.loc.ptc)] # Synthetic blocks are handled below. if", "wires that connect IOB33M and IOB33S sites. # They are", "( SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN (", "drive_resistance, intrinsic_delay, penalty_cost, \\ switch_type in cur.execute(\"\"\" SELECT name, internal_capacitance,", "output mode. # There is no feature assosciated with that", "'pins': [ { 'wire': 'GND', 'pad': 'GND', 'port_type': 'GND', 'is_clock':", "Imports 7-series routing fabric to the rr graph. For ROI", "# # The following, is to change the edge feature", "'GND', 'pad': 'GND', 'port_type': 'GND', 'is_clock': False, }, ], },", "them in a list sorted according to their Y coordinates.", "bar.update(idx) def create_channels(conn): cur = conn.cursor() cur.execute( \"\"\" SELECT chan_width_max,", "\"DIFF_OUT\". # # The \"DIFF_OUT\" cannot be set in the", "WHERE site_type_pkey = ? AND name = ? ) AND", "tile_type = synth_tile['tile_name'] wire = 'inpad' elif pin['port_type'] == 'VCC':", "create_node_remap(nodes, channels_obj): N = 2 p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max))) point_map", "( SELECT segment_pkey FROM track WHERE pkey = ? )\"\"\",", "print('{} Done writing node map.'.format(now())) if __name__ == '__main__': main()", "'pad': 'VCC', 'port_type': 'VCC', 'is_clock': False, }, ], }, \"GND\":", "{ 'tiles': { \"VCC\": { 'loc': vcc_loc, 'pins': [ {", "graph.get_segment_id_from_name(segment_name) else: segment_id = default_segment_id node_type = graph2.NodeType(graph_node_type) if node_type", "cur.execute( \"\"\" SELECT name FROM phy_tile WHERE pkey = ?;", "{} grid = db.grid() _, x_max, _, _ = grid.dims()", "fasm feature needs to be added. # The relevant features", "NOT NULL ) );\"\"\" ) connection_box_map = {} for wire_in_tile_pkey,", "cur: y_list.append(graph2.ChannelList(idx, info)) return graph2.Channels( chan_width_max=chan_width_max, x_min=x_min, y_min=y_min, x_max=x_max, y_max=y_max,", "# is a tile-wide feature named \"DIFF_OUT\". # # The", "bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey FROM wire WHERE wire_in_tile_pkey = ? AND", "backward: pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net) else: pip_name = '{}.{}.{}'.format(tile_name,", "db but there # is a tile-wide feature named \"DIFF_OUT\".", "segment_id) print('original {} final {}'.format(num_channels, len(alive_tracks))) def add_synthetic_edges(conn, graph, node_mapping,", "node WHERE pkey = ( SELECT node_pkey FROM wire WHERE", "enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return", "REBUF_SOURCES REBUF_SOURCES = {} rebuf_wire_regexp = re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' ) cur", "( SELECT phy_tile_pkey FROM tile_map WHERE tile_pkey = ? )", "and feature_path[1] == \"IOB_DIFFI_IN1\": return '' if feature_path[2] == \"IOB_PADOUT1\"", "( SELECT pkey FROM site_pin WHERE site_type_pkey = ? AND", "conn.cursor() cur.execute(\"SELECT grid_x FROM phy_tile ORDER BY grid_x DESC LIMIT", ") else: use_roi = False roi = None synth_tiles =", "assert top_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[top_graph_node_pkey] = node.id", "graph = capnp_graph.graph if synth_tiles is None: synth_tiles = find_constant_network(graph)", "yield ( src_node, sink_node, switch_id, (('fasm_features', feature), ) ) else:", "tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey( node.loc.x_low, node.loc.y_low ) if site_as_tile_pkey is", "capnp_graph2.Graph( rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp' ), input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph, ) graph", "= ( SELECT site_pin_switch_pkey FROM wire_in_tile WHERE pkey = (", "for idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey, pip_pkey, backward) in enumerate(cur.execute(\"\"\"", "[] point_map[(x, y)].append(node.id) hilbert_curve = HilbertCurve(p, N) idx = 0", "= [feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return '", "chan_width_max=chan_width_max, x_min=x_min, y_min=y_min, x_max=x_max, y_max=y_max, x_list=x_list, y_list=y_list, ) def create_connection_boxes(conn,", "add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles) print('{} Creating channels.'.format(now())) channels_obj =", "return cur.fetchone()[0] return get_tile_name def create_get_pip_wire_names(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None)", "dest_wire_in_tile_pkey FROM pip_in_tile WHERE pkey = ?;\"\"\", (pip_pkey, ) )", "get_site_as_tile_wire(site_as_tile_pkey, pin) else: cur.execute( \"\"\" SELECT pkey FROM wire_in_tile WHERE", "return synth_tiles def create_node_remap(nodes, channels_obj): N = 2 p =", "wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey WHERE wire.node_pkey = ?;\"\"\", (node_pkey,", "one # level up in the hierarchy (its tile-wide, not", "tile_name is not None and tile_name.startswith(\"CLK_BUFG_REBUF\"): rebuf_to_hrow_map[tile_name] = { \"above\":", "to the source pip name that # changes based on", "the database. IPIN and OPIN rr_nodes should already be present", "Y coordinates. \"\"\" cur.execute( \"\"\" SELECT name FROM phy_tile WHERE", "WHERE site_pin_pkey = ( SELECT pkey FROM site_pin WHERE site_type_pkey", "while hclk_cmt_x > 0: hclk_cmt_x -= 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x,", "= {} def populate_hclk_cmt_tiles(db): global HCLK_CMT_TILES HCLK_CMT_TILES = {} grid", "y_max FROM channel;\"\"\" ) chan_width_max, x_min, x_max, y_min, y_max =", "set() for (track_pkey, ) in cur.execute(\"SELECT pkey FROM track WHERE", ") result = cur.fetchone() assert result is not None, (wire_in_tile_pkey,", "find_constant_network(graph) with sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database), uri=True) as conn: populate_bufg_rebuf_map(conn) cur = conn.cursor()", "fixed. try: graph.get_switch_id(name) continue except KeyError: capnp_graph.add_switch( graph2.Switch( id=None, name=name,", "grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT': HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc(", "in switch_name_map: cur = conn.cursor() cur.execute( \"\"\"SELECT name FROM switch", "= remove_vpr_tile_prefix(tile_type) pin = m.group(2) tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey( node.loc.x_low,", "assert False, side def import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id): cur", "SELECT node_pkey FROM graph_node WHERE pkey = ? ) )", "not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN): continue gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)] pin_name", "= cur.fetchone()[0] box_id = connection_box_map[wire_in_tile_pkey] cur.execute( \"\"\" SELECT switch.intrinsic_delay FROM", ") connection_box_wire_pkey = cur.fetchone()[0] if connection_box_wire_pkey is not None: cur.execute(", "SYN-VCC tiles. \"\"\" block_types = {} for block_type in graph.block_types:", "dest_net, src_net) else: pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net) else: pip_name", "(graph_node_pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance, resistance)", "return ' '.join((feature, enable_buffer_feature)) # BUFHCE sites are now routed", "side def import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id): cur = conn.cursor()", "bits with the top half of a normal IOI TILE.", "max(x_low, 1) elif node_type == graph2.NodeType.CHANY: direction = 'Y' y_low", "HCLK_CMT_TILES: return [] hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)] return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)]", "assert len(results) == 1 wire_in_tile_pkey = results[0][0] tile_pkey, _ =", "conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\") (num_edges, ) = cur.fetchone()", "= conn.cursor() cur.execute( \"\"\" SELECT pkey, tile_type_pkey, name FROM wire_in_tile", ") ) )\"\"\", (graph_node_pkey, ) ) site_pin_delay = cur.fetchone()[0] set_connection_box(", "wire_in_tile_pkey FROM wire WHERE pkey IN ( SELECT connection_box_wire_pkey FROM", "grid, synth_tiles) print('{} Creating channels.'.format(now())) channels_obj = create_channels(conn) node_remap =", "BY grid_y DESC; \"\"\" ) return [t[0] for t in", "None, (tile_type, pin_name) node_mapping[bottom_graph_node_pkey] = node.id update_connection_box( conn, graph, bottom_graph_node_pkey,", ") else: assert False, side def import_tracks(conn, alive_tracks, node_mapping, graph,", "1 del point_map[coord] return lambda x: id_map[x] def main(): parser", "'' if feature_path[2] == \"IOB_PADOUT1\" and feature_path[1] == \"IOB_DIFFI_IN0\": return", "tile its above and below CLK_HROW tile. Note that in", "NULL;\"\"\")): if track_pkey not in alive_tracks: continue cur2.execute( \"\"\" SELECT", "= get_clk_hrow_and_rebuf_tiles_sorted(cur) # Append None on both ends of the", "node_mapping, graph, segment_id) print('original {} final {}'.format(num_channels, len(alive_tracks))) def add_synthetic_edges(conn,", "SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name FROM rebuf_nodes INNER JOIN wire ON", "gnd_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone() else: assert False,", "files', ) print('{} Starting routing import'.format(now())) args = parser.parse_args() db", "= '{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1), feature_path[-1] ) return ' '.join((feature, enable_oclkm_feature))", "to get the relative # position of the IOI_SING within", "graph, switch_name_map, switch_pkey): assert switch_pkey is not None if switch_pkey", "= (grid_loc.x, grid_loc.y) assert gnd_loc is not None assert vcc_loc", "= conn.cursor() @functools.lru_cache(maxsize=None) def get_tile_name(tile_pkey): cur.execute( \"\"\" SELECT name FROM", "top or bottom of the whole # IOI/IOB column. The", "configurations, this also connects the synthetic IO tiles to the", "node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN): continue gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)]", "WHERE pkey IN ( SELECT connection_box_wire_pkey FROM graph_node WHERE connection_box_wire_pkey", "graph.create_pin_name_from_tile_type_and_pin( tile_type, wire ) pin_node = graph.get_nodes_for_pin( tuple(synth_tile['loc']), pin_name )", ") return [t[0] for t in cur.fetchall()] def populate_bufg_rebuf_map(conn): global", "REBUF_NODES: REBUF_NODES[node_pkey] = [] m = rebuf_wire_regexp.fullmatch(rebuf_wire_name) if m.group(2) ==", "and num_outpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, (pin['z_loc'] -", "if site_as_tile_pkey is not None: wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin) else:", "tile_type_pkey, name FROM wire_in_tile WHERE pkey IN ( SELECT DISTINCT", "in progressbar_utils.progressbar(cur.execute(\"\"\" SELECT pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high,", "= list( tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc']) ).values() ) assert len(option) > 0,", "required=True, help='Output rr_graph file' ) parser.add_argument( '--write_rr_node_map', required=True, help='Output map", ") elif side == tracks.Direction.RIGHT: assert right_graph_node_pkey is not None,", "pip_in_tile_pkey, backward FROM graph_edge; \"\"\")): if src_graph_node not in node_mapping:", "== grid_loc.block_type_id: assert vcc_loc is None vcc_loc = (grid_loc.x, grid_loc.y)", "switch_id = switch_name_map[switch_pkey] return switch_id def create_get_tile_name(conn): cur = conn.cursor()", "REBUF_SOURCES = {} def get_clk_hrow_and_rebuf_tiles_sorted(cur): \"\"\" Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T", "x_list.append(graph2.ChannelList(idx, info)) cur.execute('SELECT idx, info FROM y_list;') y_list = []", "if switch_pkey not in switch_name_map: cur = conn.cursor() cur.execute( \"\"\"SELECT", "bar: for idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey, pip_pkey, backward) in", "'.join((feature, enable_cascout)) parts = feature.split('.') wire_feature = feature_when_routed(parts[1]) if wire_feature", "1;\") x_max = cur.fetchone()[0] cur.execute(\"SELECT grid_y FROM phy_tile ORDER BY", "this # need, as the IOI_SING tiles have the same", "FROM graph_node WHERE pkey = ?\"\"\", (graph_node_pkey, ) ) connection_box_wire_pkey", "in cur: connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box( reduce_connection_box(wire_name) ) return connection_box_map def", "\"\"\" # IOI_SING tiles have bits in common with the", "to an IPIN node. \"\"\" node_dict = graph.nodes[node_idx]._asdict() node_dict['connection_box'] =", "create_get_tile_name(conn) get_pip_wire_names = create_get_pip_wire_names(conn) switch_name_map = {} print('{} Importing edges", "node_pkey FROM wire WHERE pkey = ? );\"\"\", (wire_pkey, )", "FROM channel;\"\"\" ) chan_width_max, x_min, x_max, y_min, y_max = cur.fetchone()", "in the destination wire of the pip # # Example:", "assert False, \"Impossible to determine X location of BUFHCE\" y_loc", "parts = feature.split('.') wire_feature = feature_when_routed(parts[1]) if wire_feature is not", "SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN ( SELECT", "1;\") y_max = cur.fetchone()[0] return x_max + 1, y_max +", "point_map[(x, y)].append(node.id) hilbert_curve = HilbertCurve(p, N) idx = 0 id_map", "cur.fetchone() return (src_net, dest_net) return get_pip_wire_names def get_number_graph_edges(conn, graph, node_mapping):", "main(): parser = argparse.ArgumentParser() parser.add_argument( '--db_root', required=True, help='Project X-Ray Database'", "IOI_OCLK = re.compile('IOI_OCLK_([01])') # Regex for [LR]IOI_SING tiles IOI_SITE_PIPS =", "'--db_root', required=True, help='Project X-Ray Database' ) parser.add_argument('--part', required=True, help='FPGA part')", "re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format( \"|\".join(IOI_SITE_PIPS) ) ) def reduce_connection_box(box): \"\"\" Reduce the", "pkey = ? )\"\"\", (track_pkey, ) ) result = cur2.fetchone()", "cur.execute('SELECT vcc_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone() elif pin['port_type']", "source pip name that # changes based on which IOI_SING", "'GND': tile_type = 'SYN-GND' wire = 'GND' else: assert False,", "= get_switch_name( conn, graph, switch_name_map, switch_pkey ) src_node = node_mapping[src_graph_node]", "args.graph_limit: use_roi = True x_min, y_min, x_max, y_max = map(int,", "conn, graph, switch_name_map, switch_pkey ) src_node = node_mapping[src_graph_node] sink_node =", "hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"below\"] if hrow_tile is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format(", "track WHERE pkey = ? )\"\"\", (track_pkey, ) ) result", "m: # Each clock region spans a total of 50", "using an ROI, synthetic tile defintion from prjxray-arch-import' ) parser.add_argument(", "node_mapping, use_roi, roi, synth_tiles, segment_id ) # Set of (src,", "tracks.Direction.LEFT: assert left_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[left_graph_node_pkey] =", "differential input mode. # # Vivado does not report this", "= map(int, args.graph_limit.split(',')) roi = Roi( db=db, x1=x_min, y1=y_min, x2=x_max,", "to attach to the IOI_SITE_PIPS names # in the destination", "WHERE pkey = ( SELECT node_pkey FROM graph_node WHERE pkey", "= capnp_graph.graph if synth_tiles is None: synth_tiles = find_constant_network(graph) with", "box: box = 'IMUX' if box.endswith('_L'): box = box.replace('_L', '')", "cur = conn.cursor() get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire = create_get_site_as_tile_wire(cur) for", "} } return synth_tiles def create_node_remap(nodes, channels_obj): N = 2", ") m = CASCOUT_REGEX.fullmatch(feature_path[-2]) if m: enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0],", "merging some. Examples: >>> reduce_connection_box('IMUX0') 'IMUX' >>> reduce_connection_box('IMUX1') 'IMUX' >>>", "= cur.fetchone() assert result is not None, (wire_in_tile_pkey, tile_pkey) (", "= wire.phy_tile_pkey WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ORDER", "site_as_tile.pkey = ?\"\"\", (site_as_tile_pkey, ) ) results = cur.fetchall() assert", "r=drive_resistance, c_in=0.0, c_out=0.0, c_internal=internal_capacitance, t_del=intrinsic_delay, p_cost=penalty_cost, ), sizing=graph2.SwitchSizing( mux_trans_size=0, buf_size=0,", "input rr_graph. Create a mapping between database graph_nodes and IPIN,", "phy_tile_type_pkey IN ( SELECT tile_type_pkey FROM phy_tile WHERE pkey IN", "segment_id ): cur = conn.cursor() cur.execute(\"\"\"SELECT count(*) FROM track;\"\"\") (num_channels,", "max(y_low, 1) else: assert False, node_type canonical_loc = None cur2.execute(", "tile_type = 'SYN-GND' wire = 'GND' else: assert False, pin", "+ 1 def find_constant_network(graph): \"\"\" Find VCC and GND tiles", "node_remap(node_mapping[k]) print('{} Writing node map.'.format(now())) with open(args.write_rr_node_map, 'wb') as f:", "wire WHERE pkey = ( SELECT site_wire_pkey FROM node WHERE", "IN_USE: to enable the BUFHCE site # - ZINV_CE: to", "map.'.format(now())) with open(args.write_rr_node_map, 'wb') as f: pickle.dump(node_mapping, f) print('{} Done", "REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) else: assert False, (rebuf_tile,", "not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) elif m.group(2)", "\"\"\" SELECT chan_width_max, x_min, x_max, y_min, y_max FROM channel;\"\"\" )", "REBUF_SOURCES[(tile, wire_name)] = node_pkey HCLK_CMT_TILES = {} def populate_hclk_cmt_tiles(db): global", "len(alive_tracks))) def add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles): cur = conn.cursor()", "maybe_get_clk_hrow(i): \"\"\" Returns a name of CLK_HROW tile only if", "site-wide). So here we # map the PIP's feature to", "and tile.startswith(\"CLK_HROW\"): return tile return None # Assign each REBUF", "in REBUF_NODES: REBUF_NODES[node_pkey] = [] m = rebuf_wire_regexp.fullmatch(rebuf_wire_name) if m.group(2)", "(track_pkey, ) in cur.execute(\"SELECT pkey FROM track WHERE alive =", "y_loc = m.group(2) bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format(", ") ) for pin in synth_tile['pins']: if pin['port_type'] in ['input',", "FROM rebuf_nodes INNER JOIN wire ON wire.node_pkey = rebuf_nodes.node_pkey INNER", "FROM wire_in_tile WHERE name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\" OR name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\"", "1024 == 0: bar.update(idx) def create_channels(conn): cur = conn.cursor() cur.execute(", ") elif m.group(2) == 'BOT': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1)) ) hrow_tile", "Assign connection box ids for all connection box types. \"\"\"", "connection database and serialize output to rr_graph XML. \"\"\" import", "REBUF_NODES REBUF_NODES = {} global REBUF_SOURCES REBUF_SOURCES = {} rebuf_wire_regexp", "\\ feature_path[1] == \"IOB_DIFFO_IN1\": return '{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0", "update_connection_box( conn, graph, top_graph_node_pkey, node_idx, connection_box_map ) elif side ==", "= None cur2.execute( \"\"\" SELECT grid_x, grid_y FROM phy_tile WHERE", "], }, \"GND\": { 'loc': gnd_loc, 'pins': [ { 'wire':", "in cur: y_list.append(graph2.ChannelList(idx, info)) return graph2.Channels( chan_width_max=chan_width_max, x_min=x_min, y_min=y_min, x_max=x_max,", "for the SYN-GND and SYN-VCC tiles. \"\"\" block_types = {}", "pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, pin['z_loc'], wire ) elif wire ==", "hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"above\"] if hrow_tile is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format(", "get_tile_name def create_get_pip_wire_names(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_pip_wire_names(pip_pkey): cur.execute(", "simplify the code below. rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles +", "(graph_node_pkey, ) ) connection_box_wire_pkey = cur.fetchone()[0] if connection_box_wire_pkey is not", "Add rr_edge for each row in the graph_edge table. Import", "FROM phy_tile WHERE pkey = ?; \"\"\", (tile_pkey, ) )", "graph. For ROI configurations, this also connects the synthetic IO", "maybe_get_clk_hrow(i + 1), } # Find nodes touching rebuf wires.", "# level up in the hierarchy (its tile-wide, not site-wide).", ";\"\"\", (site_type_pkey, pin, site_pkey) ) results = cur.fetchall() assert len(results)", "\"\"\" SELECT site.site_type_pkey, site_as_tile.site_pkey FROM site_as_tile INNER JOIN site ON", "graph.add_edge( src_node=pin_node[0][0], sink_node=node_mapping[track_node], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) else: assert False,", "DISTINCT node_pkey FROM wire WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM", "FROM wire_in_tile WHERE pkey = ?;\"\"\", (dest_wire_in_tile_pkey, ) ) (dest_net,", "can be removed once # https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 # is fixed. try:", "num_outpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, (pin['z_loc'] - num_inpad),", "(feature_path[0], feature_path[1]) if rebuf_key in REBUF_SOURCES: return ' '.join([feature] +", "that connect IOB33M and IOB33S sites. # They are used", "def phy_grid_dims(conn): \"\"\" Returns physical grid dimensions. \"\"\" cur =", "in cur.execute(\"\"\" SELECT pkey, site_as_tile_pkey, grid_x, grid_y FROM tile;\"\"\"): tiles[(grid_x,", "to simplify the code below. rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles", "is not None, (tile_type, pin_name) node_mapping[top_graph_node_pkey] = node.id update_connection_box( conn,", ") ) ) num_outpad = len( list( filter( lambda t:", "node_idx, connection_box_map ) elif side == tracks.Direction.TOP: assert top_graph_node_pkey is", "wire.phy_tile_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey WHERE wire.node_pkey", "rebuf_nodes.node_pkey;\"\"\" ) for node_pkey, rebuf_tile, rebuf_wire_name in cur: if node_pkey", "lib.rr_graph import tracks from lib.connection_database import get_wire_pkey, get_track_model import lib.rr_graph_capnp.graph2", "== \"IOB_DIFFO_OUT0\" and \\ feature_path[1] == \"IOB_DIFFO_IN1\": return '{}.OUT_DIFF'.format(feature_path[0]) #", "\"IOB_DIFFI_IN1\": return '' if feature_path[2] == \"IOB_PADOUT1\" and feature_path[1] ==", "' '.join((feature, enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1]", "track_pkey) if wire == 'inpad' and num_inpad > 1: pin_name", "SELECT site_wire_pkey FROM node WHERE pkey = ( SELECT node_pkey", "node_mapping[src_graph_node] sink_node = node_mapping[dest_graph_node] if pip_name is not None: feature", "= 0 id_map = {} for h in range(hilbert_curve.max_h +", "were unused in arch xml, and so # were not", "prjxray_tile_import import remove_vpr_tile_prefix import simplejson as json from lib import", "open(args.write_rr_node_map, 'wb') as f: pickle.dump(node_mapping, f) print('{} Done writing node", "reduce_connection_box('B') 'B' >>> reduce_connection_box('B_L') 'B' \"\"\" box = CONNECTION_BOX_FILTER.match(box).group(1) if", "operates in the differential output mode. # There is no", "physical grid dimensions. \"\"\" cur = conn.cursor() cur.execute(\"SELECT grid_x FROM", "= ?;\"\"\", (wire_in_tile_pkey, tile_pkey) ) result = cur.fetchone() assert result", "else: assert False, side def import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id):", "= cur.fetchall() assert len(results) == 1, site_as_tile_pkey return results[0] @functools.lru_cache(maxsize=0)", "in graph.edges: yield (edge.src_node, edge.sink_node, edge.switch_id, None) # Then yield", "XML. # # TODO: This can be removed once #", "Import channel XML node from connection database and serialize output", "{ 'loc': vcc_loc, 'pins': [ { 'wire': 'VCC', 'pad': 'VCC',", "gridloc[1]) cur.execute( \"\"\" SELECT top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey FROM wire", "'CLK_HROW_TOP_R']: continue hclk_x, hclk_y = grid.loc_of_tilename(tile) hclk_cmt_x = hclk_x hclk_cmt_y", "not None, (tile_type, pin_name) node_mapping[top_graph_node_pkey] = node.id update_connection_box( conn, graph,", "not report this connection as a PIP but in the", "box_id = connection_box_map[wire_in_tile_pkey] cur.execute( \"\"\" SELECT switch.intrinsic_delay FROM switch WHERE", "name = ? ) AND site_pkey = ? ;\"\"\", (site_type_pkey,", "1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, (pin['z_loc'] - num_inpad), wire )", "conn.cursor() cur.execute( \"\"\"SELECT name FROM switch WHERE pkey = ?;\"\"\",", "TODO: This can be removed once # https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 # is", "in the connection_database. print('{} Importing graph nodes'.format(now())) import_graph_nodes(conn, graph, node_mapping,", "= m.group(1) tile_type = remove_vpr_tile_prefix(tile_type) pin = m.group(2) tile_pkey, site_as_tile_pkey", "block_types['SYN-GND'] vcc_block_id = block_types['SYN-VCC'] gnd_loc = None vcc_loc = None", "pkey = ( SELECT site_pin_switch_pkey FROM wire_in_tile WHERE pkey =", "to \"DIFF_OUT\" if feature_path[2] == \"IOB_DIFFO_OUT0\" and \\ feature_path[1] ==", "if src_graph_node not in node_mapping: continue if dest_graph_node not in", "elif side == tracks.Direction.BOTTOM: assert bottom_graph_node_pkey is not None, (tile_type,", "re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK = re.compile('IOI_OCLK_([01])')", "edges.'.format(now())) for edge in graph.edges: yield (edge.src_node, edge.sink_node, edge.switch_id, None)", ") def create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id", "( SELECT phy_tile_pkey FROM wire WHERE pkey = ? )\"\"\",", "the whole # IOI/IOB column. The Y coordinate identified with", "lambda t: t['port_type'] == 'input', synth_tile['pins'] ) ) ) for", "enumerate(cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey, switch_pkey, phy_tile_pkey, pip_in_tile_pkey, backward FROM graph_edge;", "coordinate identified with the # second capture group is dived", "destination wire of the pip # # Example: IOI_OLOGIC0 ->", "return ' '.join((feature, enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0],", "DESC; \"\"\" ) return [t[0] for t in cur.fetchall()] def", "and feature_path[1] == \"IOB_DIFFI_IN0\": return '' # REBUF stuff rebuf_key", "+ find_hclk_cmt_hclk_feature( feature_path[0], m.group(1), m.group(2) ) ) m = CASCOUT_REGEX.fullmatch(feature_path[-2])", "(wire_in_tile_pkey, tile_pkey) ( top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey ) = result", "wire_in_tile WHERE name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\" OR name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\" ),", "[None] def maybe_get_clk_hrow(i): \"\"\" Returns a name of CLK_HROW tile", "= cur.fetchone()[0] if connection_box_wire_pkey is not None: cur.execute( \"\"\" SELECT", "in cur: if node_pkey not in REBUF_NODES: REBUF_NODES[node_pkey] = []", "AND phy_tile_type_pkey IN ( SELECT tile_type_pkey FROM phy_tile WHERE pkey", "def populate_bufg_rebuf_map(conn): global REBUF_NODES REBUF_NODES = {} global REBUF_SOURCES REBUF_SOURCES", "not None, (tile_type, pin_name) node_mapping[right_graph_node_pkey] = node.id update_connection_box( conn, graph,", "elif 'R' in x_loc_str: x_loc = 1 else: assert False,", "x_max + 1, y_max + 1 def find_constant_network(graph): \"\"\" Find", "{} for i, tile_name in enumerate(rebuf_and_hrow_tiles): if tile_name is not", "# Append None on both ends of the list to", "# The difference is that the TOP IOI_SING tile shares", "1 def find_constant_network(graph): \"\"\" Find VCC and GND tiles and", ") = cur.fetchone() elif pin['port_type'] == 'GND': cur.execute('SELECT gnd_track_pkey FROM", "assert vcc_loc is not None synth_tiles = { 'tiles': {", "= block_types['SYN-VCC'] gnd_loc = None vcc_loc = None for grid_loc", "= check_feature(pip_name) if feature: yield ( src_node, sink_node, switch_id, (('fasm_features',", "between database graph_nodes and IPIN, OPIN, CHANX and CHANY rr_node", "( SELECT wire_in_tile_pkey FROM wire WHERE pkey = ( SELECT", "pkey = ( SELECT node_pkey FROM graph_node WHERE pkey =", "+= 1 del point_map[coord] return lambda x: id_map[x] def main():", "None if switch_pkey not in switch_name_map: cur = conn.cursor() cur.execute(", "total of 50 IOBs. # The IOI_SING are found on", "src_wire) feature = \"{}{}\".format(unchanged_feature, changed_feature) feature_path = feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1", "mode. # There is no feature assosciated with that PIP", "len(option)) if pin['port_type'] == 'input': tile_type = synth_tile['tile_name'] wire =", "graph2.NodeType.CHANX: direction = 'X' x_low = max(x_low, 1) elif node_type", "\"\"\" block_types = {} for block_type in graph.block_types: block_types[block_type.name] =", "src_net, dest_net) else: pip_name = None switch_id = get_switch_name( conn,", "WHERE pkey = ( SELECT node_pkey FROM wire WHERE pkey", "WHERE pkey = ( SELECT site_pin_switch_pkey FROM wire_in_tile WHERE pkey", "conn.cursor() cur.execute( \"\"\" SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE", "update_connection_box( conn, graph, bottom_graph_node_pkey, node_idx, connection_box_map ) else: assert False,", "assert False, pin def get_switch_name(conn, graph, switch_name_map, switch_pkey): assert switch_pkey", "m.group(2) bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc", "id. print('{} Adding synthetic edges'.format(now())) add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles)", "rebuf_wires) ) SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name FROM rebuf_nodes INNER JOIN", "get_tile_and_site_as_tile_pkey( node.loc.x_low, node.loc.y_low ) if site_as_tile_pkey is not None: wire_in_tile_pkey", "tile_type = remove_vpr_tile_prefix(tile_type) pin = m.group(2) tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey(", "OPIN, CHANX and CHANY rr_node ids in the rr_graph. Add", "= graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, pin['z_loc'], wire ) elif wire == 'outpad'", "'.join(features) m = HCLK_OUT.fullmatch(feature_path[-1]) if m: return ' '.join( [feature]", "= switch_name_map[switch_pkey] return switch_id def create_get_tile_name(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None)", "m.group(1), m.group(2), m.group(3), m.group(4) ) src_wire = m.group(6).replace('_SING', '') for", "features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return ' '.join(features) m = HCLK_OUT.fullmatch(feature_path[-1]) if m:", "gridinfo.tile_type == 'HCLK_CMT_L': HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) )", "m.group(1) tile_type = remove_vpr_tile_prefix(tile_type) pin = m.group(2) tile_pkey, site_as_tile_pkey =", "the bottom half of a normal IOI tile, while the", "WHERE pkey IN ( SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE", "in box: box = 'IMUX' if box.endswith('_L'): box = box.replace('_L',", "= cur.fetchone() cur.execute('SELECT idx, info FROM x_list;') x_list = []", "parser.add_argument( '--synth_tiles', help='If using an ROI, synthetic tile defintion from", "-> IOI_OLOGIC1 dst_value = '0' if is_bottom_sing else '1' unchanged_feature", "x_min,y_min,x_max,y_max', ) parser.add_argument( '--vpr_capnp_schema_dir', help='Directory container VPR schema files', )", ") assert graph_node_pkey not in node_mapping node_mapping[graph_node_pkey] = graph.add_track( track=track,", "if (x, y) not in point_map: point_map[(x, y)] = []", "feature_when_routed(parts[1]) if wire_feature is not None: return '{} {}.{}'.format(feature, parts[0],", "to enable the BUFHCE site # - ZINV_CE: to disable", "dest_graph_node in cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey FROM graph_edge; \"\"\"): if", "node if idx % 1024 == 0: bar.update(idx) def phy_grid_dims(conn):", "direction = 'X' x_low = max(x_low, 1) elif node_type ==", "block_types[block_type.name] = block_type.id assert 'SYN-GND' in block_types assert 'SYN-VCC' in", "rebuf_wire_regexp = re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' ) cur = conn.cursor() # Find", "feature.split('.') wire_feature = feature_when_routed(parts[1]) if wire_feature is not None: return", "SELECT connection_box_wire_pkey FROM graph_node WHERE connection_box_wire_pkey IS NOT NULL )", "normal IOI tile, while the BOTTOM IOI_SING # shares bits", "dest_graph_node not in node_mapping: continue num_edges += 1 return num_edges", "left_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.RIGHT: assert right_graph_node_pkey", "prjxray.db from prjxray.roi import Roi import prjxray.grid as grid from", "as the IOI_SING tiles have the same wire, and pip", ") return ' '.join((feature, enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format(", "HCLK_OUT.fullmatch(feature_path[-1]) if m: return ' '.join( [feature] + find_hclk_cmt_hclk_feature( feature_path[0],", "assert graph_node_pkey not in node_mapping node_mapping[graph_node_pkey] = graph.add_track( track=track, segment_id=segment_id,", "args.synth_tiles: use_roi = True with open(args.synth_tiles) as f: synth_tiles =", "node_type == graph2.NodeType.CHANX: direction = 'X' x_low = max(x_low, 1)", "= conn.cursor() cur.execute(\"SELECT grid_x FROM phy_tile ORDER BY grid_x DESC", "grid_x DESC LIMIT 1;\") x_max = cur.fetchone()[0] cur.execute(\"SELECT grid_y FROM", "= grid.gridinfo_at_tilename(tile) if gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']: continue hclk_x,", "'port_type': 'GND', 'is_clock': False, }, ], }, } } return", "INNER JOIN wire ON wire.node_pkey = rebuf_nodes.node_pkey INNER JOIN wire_in_tile", "enable the BUFHCE site # - ZINV_CE: to disable the", "grid_loc.y) assert gnd_loc is not None assert vcc_loc is not", "' '.join((feature, enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features = [feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0],", "(graph_node_pkey, ) ) site_pin_delay = cur.fetchone()[0] set_connection_box( graph, node_idx, grid_x,", ") else: assert False, (rebuf_tile, rebuf_wire_name) for node_pkey in REBUF_NODES:", "for src_graph_node, dest_graph_node in cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey FROM graph_edge;", "\"\"\" SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey IN", "(src, sink, switch_id) tuples that pip edges have been sent", "bottom of the whole # IOI/IOB column. The Y coordinate", "the pip # # Example: IOI_OLOGIC0 -> IOI_OLOGIC1 dst_value =", ") return ' '.join((feature, enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format(", "graph, node_mapping), node_remap=node_remap, ) for k in node_mapping: node_mapping[k] =", "gnd_loc is not None assert vcc_loc is not None synth_tiles", "{} for wire_in_tile_pkey, tile_type_pkey, wire_name in cur: connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box(", "'{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc ) enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format( feature_path[0], bufhce_loc )", "= cur2.fetchone() if result: canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1]) track =", "node_dict = graph.nodes[node_idx]._asdict() node_dict['connection_box'] = graph2.ConnectionBox( x=grid_x, y=grid_y, id=box_id, site_pin_delay=site_pin_delay,", "added. # The relevant features are: # - IN_USE: to", "from the input rr_graph. Create a mapping between database graph_nodes", "identified with the # second capture group is dived by", "the relative pip is traversed, the correct fasm feature needs", "in node_mapping: continue if dest_graph_node not in node_mapping: continue if", "to the IOI_SITE_PIPS names # in the destination wire of", "tiles have bits in common with the IOI tiles. #", "switch_type FROM switch;\"\"\"): # Add back missing switchs, which were", "len(results) == 1 wire_in_tile_pkey = results[0][0] tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0],", "for i, tile_name in enumerate(rebuf_and_hrow_tiles): if tile_name is not None", "pin track_node = track_nodes[option[0]] assert track_node in node_mapping, (track_node, track_pkey)", "'CTRL', 'FAN', 'BYP']: if pip in src_wire: src_wire = src_wire.replace('_0',", "terms. \"above\" and \"below\" mean the opposite... rebuf_to_hrow_map = {}", "a tile-wide feature named \"DIFF_OUT\". # # The \"DIFF_OUT\" cannot", "def reduce_connection_box(box): \"\"\" Reduce the number of connection boxes by", "(track_node, track_pkey) if wire == 'inpad' and num_inpad > 1:", "cur.execute( \"\"\"SELECT name FROM wire_in_tile WHERE pkey = ?;\"\"\", (src_wire_in_tile_pkey,", "if 'IOI_OCLK' in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature =", "the same switch id. print('{} Adding synthetic edges'.format(now())) add_synthetic_edges(conn, graph,", "'HCLK_CMT': HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break hclk_cmt_x", "assert track_pkey is not None, ( tile_name, pin['wire'], wire_pkey )", "pkey IN ( SELECT connection_box_wire_pkey FROM graph_node WHERE connection_box_wire_pkey IS", "assert gnd_loc is not None assert vcc_loc is not None", "here. if feature_path[2] == \"IOB_PADOUT0\" and feature_path[1] == \"IOB_DIFFI_IN1\": return", "pip. Instead of making it a pseudo-pip we simply reject", "vcc_loc is None vcc_loc = (grid_loc.x, grid_loc.y) assert gnd_loc is", "== \"IOB_DIFFI_IN1\": return '' if feature_path[2] == \"IOB_PADOUT1\" and feature_path[1]", "pkey = ( SELECT phy_tile_pkey FROM wire WHERE pkey =", "# REBUF stuff rebuf_key = (feature_path[0], feature_path[1]) if rebuf_key in", "cur.fetchall() assert len(results) == 1, site_as_tile_pkey return results[0] @functools.lru_cache(maxsize=0) def", "making it a pseudo-pip we simply reject fasm # features", "and add them. print('{} Creating tracks'.format(now())) segment_id = graph.get_segment_id_from_name('dummy') create_track_rr_graph(", ") ) result = cur2.fetchone() if result is not None:", "'inpad' elif pin['port_type'] == 'VCC': tile_type = 'SYN-VCC' wire =", "} # Find nodes touching rebuf wires. cur.execute( \"\"\" WITH", "def get_site_as_tile_wire(site_as_tile_pkey, pin): site_type_pkey, site_pkey = get_site_from_site_as_tile( site_as_tile_pkey ) cur.execute(", "is defined one # level up in the hierarchy (its", "XML node from connection database and serialize output to rr_graph", "= IOI_SING_REGEX.fullmatch(feature) if m: # Each clock region spans a", "site_pin WHERE site_type_pkey = ? AND name = ? )", "'--vpr_capnp_schema_dir', help='Directory container VPR schema files', ) print('{} Starting routing", "site_wire_pkey FROM node WHERE pkey = ( SELECT node_pkey FROM", "rr_graph XML. \"\"\" import argparse import os.path from hilbertcurve.hilbertcurve import", "connection_box_map = {} for wire_in_tile_pkey, tile_type_pkey, wire_name in cur: connection_box_map[wire_in_tile_pkey]", "channels_obj=channels_obj, connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges, edges_obj=import_graph_edges(conn, graph, node_mapping), node_remap=node_remap, )", "get_clk_hrow_and_rebuf_tiles_sorted(cur) # Append None on both ends of the list", "cur2.execute( \"\"\" SELECT name FROM segment WHERE pkey = (", "internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \\ switch_type in cur.execute(\"\"\" SELECT name,", "(SELECT wire_in_tile_pkey FROM rebuf_wires) ) SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name FROM", "INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey INNER JOIN phy_tile", "return ' '.join(features) m = HCLK_OUT.fullmatch(feature_path[-1]) if m: return '", "container VPR schema files', ) print('{} Starting routing import'.format(now())) args", "return tiles[(x, y)] return get_tile_and_site_as_tile_pkey def create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0) def get_site_from_site_as_tile(site_as_tile_pkey):", "= grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number):", "in nodes: x = node.loc.x_low y = node.loc.y_low if (x,", "{} global REBUF_SOURCES REBUF_SOURCES = {} rebuf_wire_regexp = re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)'", "'VCC': tile_type = 'SYN-VCC' wire = 'VCC' elif pin['port_type'] ==", "grid = db.grid() _, x_max, _, _ = grid.dims() for", "'wb') as f: pickle.dump(node_mapping, f) print('{} Done writing node map.'.format(now()))", "in ['input', 'output']: wire_pkey = get_wire_pkey(conn, tile_name, pin['wire']) cur.execute( \"\"\"", "but in the prjxray db it # is a pip.", "'outpad' elif pin['port_type'] == 'output': tile_type = synth_tile['tile_name'] wire =", "'{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1), feature_path[-1] ) return ' '.join((feature, enable_oclkm_feature)) if", "if pin['port_type'] == 'input': graph.add_edge( src_node=node_mapping[track_node], sink_node=pin_node[0][0], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']),", "info)) cur.execute('SELECT idx, info FROM y_list;') y_list = [] for", "y_max=y_max, x_list=x_list, y_list=y_list, ) def create_connection_boxes(conn, graph): \"\"\" Assign connection", "for node_pkey in REBUF_NODES: cur.execute( \"\"\" SELECT phy_tile.name, wire_in_tile.name FROM", "= cur.fetchone() cur.execute( \"SELECT wire_in_tile_pkey FROM wire WHERE pkey =", "connection_box_map ) elif side == tracks.Direction.TOP: assert top_graph_node_pkey is not", "m.group(1) ) ) elif m.group(2) == 'BOT': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1))", "} return synth_tiles def create_node_remap(nodes, channels_obj): N = 2 p", "= ? AND phy_tile_type_pkey IN ( SELECT tile_type_pkey FROM phy_tile", "and IOB33S sites. # They are used in differential input", "(wire_in_tile_pkey, tile_pkey) ) result = cur.fetchone() assert result is not", "track_nodes[option[0]] assert track_node in node_mapping, (track_node, track_pkey) if wire ==", "% 1024 == 0: bar.update(idx) def phy_grid_dims(conn): \"\"\" Returns physical", "and \\ feature_path[1] == \"IOB_DIFFO_IN1\": return '{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1 #", "INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey INNER JOIN wire_in_tile", "y2=y_max, ) else: use_roi = False roi = None synth_tiles", "wire ON wire.node_pkey = rebuf_nodes.node_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey", "VCC and GND tiles and create synth_tiles input. All arches", "m.group(1), feature_path[-1] ) return ' '.join((feature, enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature", "\"above\": maybe_get_clk_hrow(i - 1), \"below\": maybe_get_clk_hrow(i + 1), } #", "node_type canonical_loc = None cur2.execute( \"\"\" SELECT grid_x, grid_y FROM", ") elif wire == 'outpad' and num_outpad > 1: pin_name", "node.loc.y_low ) if site_as_tile_pkey is not None: wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey,", "be removed once # https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 # is fixed. try: graph.get_switch_id(name)", "grid_x, grid_y, box_id, site_pin_delay ) def create_get_tile_and_site_as_tile_pkey(cur): tiles = {}", "'wire': 'GND', 'pad': 'GND', 'port_type': 'GND', 'is_clock': False, }, ],", "assert False, (rebuf_tile, rebuf_wire_name) for node_pkey in REBUF_NODES: cur.execute( \"\"\"", "its above and below CLK_HROW tile. Note that in #", "prjxray-arch-import' ) parser.add_argument( '--graph_limit', help='Limit grid to specified dimensions in", "is not None and tile_name.startswith(\"CLK_BUFG_REBUF\"): rebuf_to_hrow_map[tile_name] = { \"above\": maybe_get_clk_hrow(i", "pkey = ?;\"\"\", (dest_wire_in_tile_pkey, ) ) (dest_net, ) = cur.fetchone()", "rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles + [None] def maybe_get_clk_hrow(i): \"\"\"", "phy_tile ORDER BY grid_y DESC LIMIT 1;\") y_max = cur.fetchone()[0]", "each row in the graph_edge table. Import channel XML node", "False, }, ], }, } } return synth_tiles def create_node_remap(nodes,", "based on which IOI_SING is selected (top or bottom) #", "FROM wire WHERE wire_in_tile_pkey = ? AND tile_pkey = ?;\"\"\",", "node_mapping, (track_node, track_pkey) if wire == 'inpad' and num_inpad >", "IOI tile, while the BOTTOM IOI_SING # shares bits with", "idx idx += 1 del point_map[coord] return lambda x: id_map[x]", "in enumerate(graph.nodes): if node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN): continue gridloc", "simply reject fasm # features here. if feature_path[2] == \"IOB_PADOUT0\"", "'--write_rr_node_map', required=True, help='Output map of graph_node_pkey to rr inode file'", "PIP is active the IOB operates in the differential output", "need, as the IOI_SING tiles have the same wire, and", "right_graph_node_pkey FROM wire WHERE wire_in_tile_pkey = ? AND tile_pkey =", "= cur2.fetchone() if result is not None: segment_name = result[0]", "x_low, x_high, y_low, y_high, ptc, capacitance, resistance) in progressbar_utils.progressbar(cur.execute(\"\"\" SELECT", "= graph.pin_ptc_to_name_map[ (gridloc.block_type_id, node.loc.ptc)] # Synthetic blocks are handled below.", "is not None: tile_name = get_tile_name(phy_tile_pkey) src_net, dest_net = get_pip_wire_names(pip_pkey)", "print('{} Adding synthetic edges'.format(now())) add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles) print('{}", "def yield_nodes(nodes): with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar: for idx, node in", "top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey FROM wire WHERE wire_in_tile_pkey = ?", "and create synth_tiles input. All arches should have these synthetic", "therefore, # when the relative pip is traversed, the correct", "the CE signal to constant 1 m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if", "argparse import os.path from hilbertcurve.hilbertcurve import HilbertCurve import math import", "pkey = ? );\"\"\", (wire_pkey, ) ) (track_pkey, ) =", "= cur.fetchone() else: assert False, pin['port_type'] tracks_model, track_nodes = get_track_model(conn,", "intrinsic_delay, penalty_cost, switch_type FROM switch;\"\"\"): # Add back missing switchs,", "( SELECT site_pin_switch_pkey FROM wire_in_tile WHERE pkey = ( SELECT", "track graph nodes and add them. print('{} Creating tracks'.format(now())) segment_id", ") ) (switch_name, ) = cur.fetchone() switch_id = graph.get_switch_id(switch_name) switch_name_map[switch_pkey]", "0: hclk_cmt_x -= 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type", "create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id ): cur", "# CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS = re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$') def set_connection_box(", "'IMUX' >>> reduce_connection_box('A_L10') 'A' >>> reduce_connection_box('B') 'B' >>> reduce_connection_box('B_L') 'B'", "sink_node=pin_node[0][0], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) elif pin['port_type'] in ['VCC', 'GND',", "if side == tracks.Direction.LEFT: assert left_graph_node_pkey is not None, (tile_type,", "number of connection boxes by merging some. Examples: >>> reduce_connection_box('IMUX0')", "= 'outpad' elif pin['port_type'] == 'output': tile_type = synth_tile['tile_name'] wire", "os.path from hilbertcurve.hilbertcurve import HilbertCurve import math import prjxray.db from", "is a pip. Instead of making it a pseudo-pip we", "node.id update_connection_box( conn, graph, bottom_graph_node_pkey, node_idx, connection_box_map ) else: assert", "'outpad' and num_outpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, (pin['z_loc']", "continue m = PIN_NAME_TO_PARTS.match(pin_name) assert m is not None, pin_name", "\"below\" mean the opposite... rebuf_to_hrow_map = {} for i, tile_name", "wire_in_tile WHERE site_pin_pkey = ( SELECT pkey FROM site_pin WHERE", "(dest_net, ) = cur.fetchone() return (src_net, dest_net) return get_pip_wire_names def", "rebuf_tile, rebuf_wire_name in cur: if node_pkey not in REBUF_NODES: REBUF_NODES[node_pkey]", "site_type_pkey, site_pkey = get_site_from_site_as_tile( site_as_tile_pkey ) cur.execute( \"\"\" SELECT pkey", "prjxray_constant_site_pins import feature_when_routed from prjxray_tile_import import remove_vpr_tile_prefix import simplejson as", "conn.cursor() @functools.lru_cache(maxsize=None) def get_tile_name(tile_pkey): cur.execute( \"\"\" SELECT name FROM phy_tile", "?;\"\"\", (switch_pkey, ) ) (switch_name, ) = cur.fetchone() switch_id =", "PIP but in the prjxray db it # is a", "= wire.wire_in_tile_pkey INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey WHERE", "with sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database), uri=True) as conn: populate_bufg_rebuf_map(conn) cur = conn.cursor() for", "from connection database and serialize output to rr_graph XML. \"\"\"", "this feature requires other features to be enabled. Some pips", "parser.parse_args() db = prjxray.db.Database(args.db_root, args.part) populate_hclk_cmt_tiles(db) synth_tiles = None if", "cur.execute('SELECT idx, info FROM x_list;') x_list = [] for idx,", "assert m is not None, pin_name tile_type = m.group(1) tile_type", "p_cost=penalty_cost, ), sizing=graph2.SwitchSizing( mux_trans_size=0, buf_size=0, ), ) ) # Mapping", "None: synth_tiles = find_constant_network(graph) with sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database), uri=True) as conn: populate_bufg_rebuf_map(conn)", "pin['port_type'] tracks_model, track_nodes = get_track_model(conn, track_pkey) option = list( tracks_model.get_tracks_for_wire_at_coord(", "filter( lambda t: t['port_type'] == 'output', synth_tile['pins'] ) ) )", "tiles. \"\"\" block_types = {} for block_type in graph.block_types: block_types[block_type.name]", "= node.loc.x_low y = node.loc.y_low if (x, y) not in", "track_pkey FROM node WHERE pkey = ( SELECT node_pkey FROM", "FROM wire INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey INNER", "rr_edge for each row in the graph_edge table. Import channel", "This is the value to attach to the IOI_SITE_PIPS names", "? AND tile_pkey = ?;\"\"\", (wire_in_tile_pkey, tile_pkey) ) result =", "create synth_tiles input. All arches should have these synthetic tiles,", "node_mapping: continue if dest_graph_node not in node_mapping: continue num_edges +=", "is connected to VCC. # This sets the CE signal", "enabling this feature requires other features to be enabled. Some", "== tracks.Direction.LEFT: assert left_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[left_graph_node_pkey]", "be added. # The relevant features are: # - IN_USE:", "pin def get_switch_name(conn, graph, switch_name_map, switch_pkey): assert switch_pkey is not", "switch_pkey): assert switch_pkey is not None if switch_pkey not in", "the TOP or BOTTOM of an IOI column m =", ") break def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number): if (hclk_tile, lr) not", "= cur.fetchone() print('{} Import alive tracks'.format(now())) alive_tracks = set() for", "x_max, y_min, y_max FROM channel;\"\"\" ) chan_width_max, x_min, x_max, y_min,", "= conn.cursor() delayless_switch = graph.get_switch_id('__vpr_delayless_switch__') for tile_name, synth_tile in synth_tiles['tiles'].items():", "conn.cursor() get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire = create_get_site_as_tile_wire(cur) for node_idx, node", "connection box list'.format(now())) connection_box_map = create_connection_boxes(conn, graph) # Match site", "rr_graph. Add rr_edge for each row in the graph_edge table.", "graph.get_switch_id('__vpr_delayless_switch__') for tile_name, synth_tile in synth_tiles['tiles'].items(): num_inpad = len( list(", "grid_loc.y) if vcc_block_id == grid_loc.block_type_id: assert vcc_loc is None vcc_loc", "half of a normal IOI TILE. # # The following,", "box.replace('_L', '') return box REBUF_NODES = {} REBUF_SOURCES = {}", "None vcc_loc = None for grid_loc in graph.grid: if gnd_block_id", "def check_feature(feature): \"\"\" Check if enabling this feature requires other", "feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_buffer_feature)) # BUFHCE sites", "phy_grid_dims(conn) connection_box_obj = graph.create_connection_box_object( x_dim=x_dim, y_dim=y_dim ) num_edges = get_number_graph_edges(conn,", "(edge.src_node, edge.sink_node, edge.switch_id, None) # Then yield edges from database.", "TOP or BOTTOM of an IOI column m = IOI_SING_REGEX.fullmatch(feature)", "# in the destination wire of the pip # #", "the correct fasm feature needs to be added. # The", "site.site_type_pkey, site_as_tile.site_pkey FROM site_as_tile INNER JOIN site ON site.pkey =", "site_as_tile_pkey ) cur.execute( \"\"\" SELECT pkey FROM wire_in_tile WHERE site_pin_pkey", "tile_name, pin['wire'], wire_pkey ) elif pin['port_type'] == 'VCC': cur.execute('SELECT vcc_track_pkey", "= graph.create_connection_box_object( x_dim=x_dim, y_dim=y_dim ) num_edges = get_number_graph_edges(conn, graph, node_mapping)", "is to change the edge feature to accomodate this #", "HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break hclk_cmt_x =", "this PIP is active the IOB operates in the differential", "connect IOB33M and IOB33S sites. # They are used in", "feature to \"DIFF_OUT\" if feature_path[2] == \"IOB_DIFFO_OUT0\" and \\ feature_path[1]", "is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) elif", "Each clock region spans a total of 50 IOBs. #", "= None for grid_loc in graph.grid: if gnd_block_id == grid_loc.block_type_id:", "'GND' else: assert False, pin track_node = track_nodes[option[0]] assert track_node", "canonical_loc=canonical_loc ) def create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles,", "other features to be enabled. Some pips imply other features.", "= ? ) );\"\"\", (pin, tile_pkey) ) results = cur.fetchall()", "( SELECT canon_phy_tile_pkey FROM track WHERE pkey = ? )\"\"\",", "\"CLK_HROW_TOP_R_%\" OR name LIKE \"CLK_BUFG_REBUF_%\" ORDER BY grid_y DESC; \"\"\"", ">>> reduce_connection_box('BRAM_ADDR') 'IMUX' >>> reduce_connection_box('A_L10') 'A' >>> reduce_connection_box('B') 'B' >>>", "phy_tile WHERE name LIKE \"CLK_HROW_BOT_R_%\" OR name LIKE \"CLK_HROW_TOP_R_%\" OR", "x_loc_str: x_loc = 0 elif 'R' in x_loc_str: x_loc =", "# Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles. rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur)", "been sent to # VPR. VPR cannot handle duplicate paths", "db.grid() _, x_max, _, _ = grid.dims() for tile in", "wire_feature = feature_when_routed(parts[1]) if wire_feature is not None: return '{}", "= result[0] segment_id = graph.get_segment_id_from_name(segment_name) else: segment_id = default_segment_id node_type", "direction=direction, x_low=x_low, x_high=x_high, y_low=y_low, y_high=y_high, ) assert graph_node_pkey not in", "x2=x_max, y2=y_max, ) else: use_roi = False roi = None", "an ROI, synthetic tile defintion from prjxray-arch-import' ) parser.add_argument( '--graph_limit',", "1 m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m: x_loc_str = m.group(1) if", "nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges, edges_obj=import_graph_edges(conn, graph, node_mapping), node_remap=node_remap, ) for k in", "'rr_graph_uxsdcxx.capnp' ), input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph, ) graph = capnp_graph.graph if", "\"\"\" Returns a name of CLK_HROW tile only if its", "the TOP IOI_SING tile shares bits with # the bottom", "features to be enabled. Some pips imply other features. Example:", "pkey FROM site_pin WHERE site_type_pkey = ? AND name =", "all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles. returns them in a", "create_connection_boxes(conn, graph) # Match site pins rr nodes with graph_node's", "site_pin_delay=site_pin_delay, ) graph.nodes[node_idx] = graph2.Node(**node_dict) def update_connection_box( conn, graph, graph_node_pkey,", "switch_id) tuples that pip edges have been sent to #", "wire INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey INNER JOIN", "True with open(args.synth_tiles) as f: synth_tiles = json.load(f) roi =", "site_pkey) ) results = cur.fetchall() assert len(results) == 1 wire_in_tile_pkey", "progressbar_utils.ProgressBar(max_value=num_edges) as bar: for idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey, pip_pkey,", "= (tile_pkey, site_as_tile_pkey) def get_tile_and_site_as_tile_pkey(x, y): return tiles[(x, y)] return", "continue if pip_pkey is not None: tile_name = get_tile_name(phy_tile_pkey) src_net,", "routing import'.format(now())) args = parser.parse_args() db = prjxray.db.Database(args.db_root, args.part) populate_hclk_cmt_tiles(db)", "= (feature_path[0], feature_path[1]) if rebuf_key in REBUF_SOURCES: return ' '.join([feature]", "edge.switch_id, None) # Then yield edges from database. cur =", "'GND', 'output']: graph.add_edge( src_node=pin_node[0][0], sink_node=node_mapping[track_node], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) else:", "shares bits with # the bottom half of a normal", "tracks'.format(now())) import_tracks(conn, alive_tracks, node_mapping, graph, segment_id) print('original {} final {}'.format(num_channels,", "common with the IOI tiles. # # The difference is", "print('{} Creating connection box list'.format(now())) connection_box_map = create_connection_boxes(conn, graph) #", "in a list sorted according to their Y coordinates. \"\"\"", "edges from database. cur = conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\"", "bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc )", "{ 'loc': gnd_loc, 'pins': [ { 'wire': 'GND', 'pad': 'GND',", "= '{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc ) enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format( feature_path[0], bufhce_loc", "from prjxray_tile_import import remove_vpr_tile_prefix import simplejson as json from lib", "'IMUX' >>> reduce_connection_box('BRAM_ADDR') 'IMUX' >>> reduce_connection_box('A_L10') 'A' >>> reduce_connection_box('B') 'B'", "wire_in_tile.name FROM rebuf_nodes INNER JOIN wire ON wire.node_pkey = rebuf_nodes.node_pkey", "connection_box_map ): \"\"\" Update connection box of IPIN node if", "import simplejson as json from lib import progressbar_utils import datetime", "conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id ): cur =", "tiles. returns them in a list sorted according to their", "internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, switch_type FROM switch;\"\"\"): # Add back", "src_wire.replace('_0', '_{}'.format(src_value)) if 'IOI_OCLK' in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(dst_value))", "?;\"\"\", (wire_in_tile_pkey, tile_pkey) ) result = cur.fetchone() assert result is", "node_pkey FROM graph_node WHERE pkey = ? ) ) )", "tuples that pip edges have been sent to # VPR.", "result side = node.loc.side if side == tracks.Direction.LEFT: assert left_graph_node_pkey", "wire_in_tile_pkey FROM rebuf_wires) ) SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name FROM rebuf_nodes", "= re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX =", "FROM x_list;') x_list = [] for idx, info in cur:", "src_wire = src_wire.replace('_0', '_{}'.format(src_value)) if 'IOI_OCLK' in src_wire: src_wire =", "'OCLKM_'] IOI_SING_REGEX = re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format( \"|\".join(IOI_SITE_PIPS) ) ) def reduce_connection_box(box):", "in HCLK_CMT_TILES: return [] hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)] return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile,", "= ( SELECT site_wire_pkey FROM node WHERE pkey = (", ") connection_box_map = {} for wire_in_tile_pkey, tile_type_pkey, wire_name in cur:", "node.loc.x_low y = node.loc.y_low if (x, y) not in point_map:", "\"|\".join(IOI_SITE_PIPS) ) ) def reduce_connection_box(box): \"\"\" Reduce the number of", "grid_loc.block_type_id: assert vcc_loc is None vcc_loc = (grid_loc.x, grid_loc.y) assert", "= None synth_tiles = None capnp_graph = capnp_graph2.Graph( rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir,", "db = prjxray.db.Database(args.db_root, args.part) populate_hclk_cmt_tiles(db) synth_tiles = None if args.synth_tiles:", "from lib import progressbar_utils import datetime import re import functools", "global HCLK_CMT_TILES HCLK_CMT_TILES = {} grid = db.grid() _, x_max,", ">>> reduce_connection_box('IMUX1') 'IMUX' >>> reduce_connection_box('IMUX10') 'IMUX' >>> reduce_connection_box('BRAM_ADDR') 'IMUX' >>>", "in synth_tiles['tiles'].items(): num_inpad = len( list( filter( lambda t: t['port_type']", "math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max))) point_map = {} for node in nodes: x", "c_in=0.0, c_out=0.0, c_internal=internal_capacitance, t_del=intrinsic_delay, p_cost=penalty_cost, ), sizing=graph2.SwitchSizing( mux_trans_size=0, buf_size=0, ),", "get_wire_pkey(conn, tile_name, pin['wire']) cur.execute( \"\"\" SELECT track_pkey FROM node WHERE", "re import functools import pickle import sqlite3 now = datetime.datetime.now", "N = 2 p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max))) point_map = {}", "phy_tile_pkey, pip_in_tile_pkey, backward FROM graph_edge; \"\"\")): if src_graph_node not in", "gnd_block_id == grid_loc.block_type_id: assert gnd_loc is None gnd_loc = (grid_loc.x,", "\"DIFF_OUT\" cannot be set in the architecture as it is", "FROM graph_node WHERE track_pkey IS NOT NULL;\"\"\")): if track_pkey not", "y_high=y_high, ) assert graph_node_pkey not in node_mapping node_mapping[graph_node_pkey] = graph.add_track(", "= ?;\"\"\", (dest_wire_in_tile_pkey, ) ) (dest_net, ) = cur.fetchone() return", "generating routing graph for ROI.'.format(now())) elif args.graph_limit: use_roi = True", "a total of 50 IOBs. # The IOI_SING are found", "pin['port_type'] == 'output': tile_type = synth_tile['tile_name'] wire = 'inpad' elif", "defined one # level up in the hierarchy (its tile-wide,", ") ) connection_box_wire_pkey = cur.fetchone()[0] if connection_box_wire_pkey is not None:", "FROM wire WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) )", "= re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX =", "SELECT tile_type_pkey FROM phy_tile WHERE pkey IN ( SELECT phy_tile_pkey", "= 'VCC' elif pin['port_type'] == 'GND': tile_type = 'SYN-GND' wire", "synth_tiles = json.load(f) roi = Roi( db=db, x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'],", "= m.group(1) if 'L' in x_loc_str: x_loc = 0 elif", "cur.fetchone() assert result is not None, (wire_in_tile_pkey, tile_pkey) ( top_graph_node_pkey,", "not None: return '{} {}.{}'.format(feature, parts[0], wire_feature) return feature #", "synth_tile['tile_name'] wire = 'outpad' elif pin['port_type'] == 'output': tile_type =", "node_mapping, grid, synth_tiles) print('{} Creating channels.'.format(now())) channels_obj = create_channels(conn) node_remap", ">>> reduce_connection_box('B_L') 'B' \"\"\" box = CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR' in", "IPIN and OPIN rr_nodes should already be present from the", "right_graph_node_pkey ) = result side = node.loc.side if side ==", "rr_graph file' ) parser.add_argument( '--write_rr_graph', required=True, help='Output rr_graph file' )", "node from connection database and serialize output to rr_graph XML.", "\"SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?\", (connection_box_wire_pkey, )", "for CHANX and CHANY from the database. IPIN and OPIN", "found on the TOP or BOTTOM of an IOI column", "get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire = create_get_site_as_tile_wire(cur) for node_idx, node in", ") = cur.fetchone() return (src_net, dest_net) return get_pip_wire_names def get_number_graph_edges(conn,", "= {} def get_clk_hrow_and_rebuf_tiles_sorted(cur): \"\"\" Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and", "src_graph_node, dest_graph_node in cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey FROM graph_edge; \"\"\"):", "of an IOI column m = IOI_SING_REGEX.fullmatch(feature) if m: #", "as f: pickle.dump(node_mapping, f) print('{} Done writing node map.'.format(now())) if", "is a tile-wide feature named \"DIFF_OUT\". # # The \"DIFF_OUT\"", "id_map[x] def main(): parser = argparse.ArgumentParser() parser.add_argument( '--db_root', required=True, help='Project", "feature named \"DIFF_OUT\". # # The \"DIFF_OUT\" cannot be set", "== graph2.NodeType.CHANX: direction = 'X' x_low = max(x_low, 1) elif", "assert len(option) > 0, (pin, len(option)) if pin['port_type'] == 'input':", "pip edges have been sent to # VPR. VPR cannot", "if gridinfo.tile_type == 'HCLK_CMT': HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y)", "is_bottom_sing = int(m.group(2)) % 50 == 0 # This is", "= cur.fetchone() elif pin['port_type'] == 'GND': cur.execute('SELECT gnd_track_pkey FROM constant_sources')", "FROM wire_in_tile WHERE name = ? AND phy_tile_type_pkey IN (", "Importing existing edges.'.format(now())) for edge in graph.edges: yield (edge.src_node, edge.sink_node,", "Update connection box of IPIN node if needed. \"\"\" cur", "hierarchy (its tile-wide, not site-wide). So here we # map", "cur.execute(\"SELECT grid_x FROM phy_tile ORDER BY grid_x DESC LIMIT 1;\")", "feature), ) ) else: yield (src_node, sink_node, switch_id, ()) else:", "}, \"GND\": { 'loc': gnd_loc, 'pins': [ { 'wire': 'GND',", "# position of the IOI_SING within the clock region column", "box_id, site_pin_delay ): \"\"\" Assign a connection box to an", "= {} for tile_pkey, site_as_tile_pkey, grid_x, grid_y in cur.execute(\"\"\" SELECT", "parser.add_argument( '--write_rr_graph', required=True, help='Output rr_graph file' ) parser.add_argument( '--write_rr_node_map', required=True,", "cur.execute(\"\"\" SELECT pkey, site_as_tile_pkey, grid_x, grid_y FROM tile;\"\"\"): tiles[(grid_x, grid_y)]", "= wire.phy_tile_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey WHERE", "for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']: if pip", "Assign each REBUF tile its above and below CLK_HROW tile.", "# They are used in differential input mode. # #", "pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey IN ( SELECT", "rr graph for the SYN-GND and SYN-VCC tiles. \"\"\" block_types", "), ) ) # Mapping of graph_node.pkey to rr node", "features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return ' '.join(features) m", "return connection_box_map def yield_nodes(nodes): with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar: for idx,", "pin_name = graph.pin_ptc_to_name_map[ (gridloc.block_type_id, node.loc.ptc)] # Synthetic blocks are handled", "the same wire, and pip names # despite they are", "len(graph.edges) print('{} Counting edges.'.format(now())) cur = conn.cursor() cur.execute(\"SELECT count() FROM", "get_switch_name( conn, graph, switch_name_map, switch_pkey ) src_node = node_mapping[src_graph_node] sink_node", "populate_hclk_cmt_tiles(db) synth_tiles = None if args.synth_tiles: use_roi = True with", ") results = cur.fetchall() assert len(results) == 1 wire_in_tile_pkey =", "rr_node ids in the rr_graph. Add rr_edge for each row", "in range(hilbert_curve.max_h + 1): coord = tuple(hilbert_curve.coordinates_from_distance(h)) if coord not", "+= 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT_L':", "= max(x_low, 1) elif node_type == graph2.NodeType.CHANY: direction = 'Y'", "input. All arches should have these synthetic tiles, search the", "imply other features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 \"\"\" # IOI_SING", "the relative # position of the IOI_SING within the clock", "node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0., ) continue m = PIN_NAME_TO_PARTS.match(pin_name) assert m", "?\", (connection_box_wire_pkey, ) ) wire_in_tile_pkey = cur.fetchone()[0] box_id = connection_box_map[wire_in_tile_pkey]", "same switch id. print('{} Adding synthetic edges'.format(now())) add_synthetic_edges(conn, graph, node_mapping,", "'port_type': 'VCC', 'is_clock': False, }, ], }, \"GND\": { 'loc':", "grid_loc.block_type_id: assert gnd_loc is None gnd_loc = (grid_loc.x, grid_loc.y) if", "wire of the pip # # Example: IOI_OLOGIC0 -> IOI_OLOGIC1", "switch_name_map[switch_pkey] return switch_id def create_get_tile_name(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def", "FROM track;\"\"\") (num_channels, ) = cur.fetchone() print('{} Import alive tracks'.format(now()))", "= get_site_from_site_as_tile( site_as_tile_pkey ) cur.execute( \"\"\" SELECT pkey FROM wire_in_tile", "the code below. rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles + [None]", "= re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER =", "\"\"\" Imports 7-series routing fabric to the rr graph. For", "IOI_OLOGIC0 -> IOI_OLOGIC1 dst_value = '0' if is_bottom_sing else '1'", "switch_type in cur.execute(\"\"\" SELECT name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, switch_type", "AND tile_pkey = ?;\"\"\", (wire_in_tile_pkey, tile_pkey) ) result = cur.fetchone()", "BUFHCE\" y_loc = m.group(2) bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc) enable_bufhce_in_use =", "SELECT grid_x, grid_y FROM phy_tile WHERE pkey = ( SELECT", "1, y_max + 1 def find_constant_network(graph): \"\"\" Find VCC and", "graph_node_pkey, node_idx, connection_box_map ): \"\"\" Update connection box of IPIN", "rebuf_and_hrow_tiles[i] if tile is not None and tile.startswith(\"CLK_HROW\"): return tile", "node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj) x_dim, y_dim = phy_grid_dims(conn) connection_box_obj =", "'SYN-GND' wire = 'GND' else: assert False, pin track_node =", "segment WHERE pkey = ( SELECT segment_pkey FROM track WHERE", "buf_size=0, ), ) ) # Mapping of graph_node.pkey to rr", "'SYN-VCC' in block_types gnd_block_id = block_types['SYN-GND'] vcc_block_id = block_types['SYN-VCC'] gnd_loc", "node_mapping), node_remap=node_remap, ) for k in node_mapping: node_mapping[k] = node_remap(node_mapping[k])", "IOB_PADOUT1->IOB_DIFFI_IN0 # # These connections are hard wires that connect", "assert left_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[left_graph_node_pkey] = node.id", ") = cur.fetchone() cur.execute( \"\"\"SELECT name FROM wire_in_tile WHERE pkey", "graph, segment_id) print('original {} final {}'.format(num_channels, len(alive_tracks))) def add_synthetic_edges(conn, graph,", "existing edges.'.format(now())) for edge in graph.edges: yield (edge.src_node, edge.sink_node, edge.switch_id,", "point_map: continue for old_id in point_map[coord]: id_map[old_id] = idx idx", "= None if args.synth_tiles: use_roi = True with open(args.synth_tiles) as", "assert False, pin track_node = track_nodes[option[0]] assert track_node in node_mapping,", "if dest_graph_node not in node_mapping: continue num_edges += 1 return", "pkey = ( SELECT wire_in_tile_pkey FROM wire WHERE pkey =", "to be added. # The relevant features are: # -", "python3 \"\"\" Imports 7-series routing fabric to the rr graph.", "feature # CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS = re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$') def", "IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_'] IOI_SING_REGEX = re.compile(", "create_get_tile_and_site_as_tile_pkey(cur): tiles = {} for tile_pkey, site_as_tile_pkey, grid_x, grid_y in", "- ZINV_CE: to disable the inverter on CE input which", "== 1 wire_in_tile_pkey = results[0][0] return wire_in_tile_pkey return get_site_as_tile_wire def", "changes based on which IOI_SING is selected (top or bottom)", "phy_tile ORDER BY grid_x DESC LIMIT 1;\") x_max = cur.fetchone()[0]", "'') for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']: if", "= rebuf_nodes.node_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey INNER", "for ROI.'.format(now())) elif args.graph_limit: use_roi = True x_min, y_min, x_max,", "# Mapping of graph_node.pkey to rr node id. node_mapping =", "None, pin_name tile_type = m.group(1) tile_type = remove_vpr_tile_prefix(tile_type) pin =", "tiles[(x, y)] return get_tile_and_site_as_tile_pkey def create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0) def get_site_from_site_as_tile(site_as_tile_pkey): cur.execute(", "Importing alive tracks'.format(now())) import_tracks(conn, alive_tracks, node_mapping, graph, segment_id) print('original {}", "# map the PIP's feature to \"DIFF_OUT\" if feature_path[2] ==", "names # despite they are found on the TOP or", "code below. rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles + [None] def", "= graph.get_nodes_for_pin( tuple(synth_tile['loc']), pin_name ) if pin['port_type'] == 'input': graph.add_edge(", "node_mapping): # First yield existing edges print('{} Importing existing edges.'.format(now()))", "are: # - IN_USE: to enable the BUFHCE site #", "create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id ) #", "in the architecture as it is defined one # level", "{} print('{} Importing edges from database.'.format(now())) with progressbar_utils.ProgressBar(max_value=num_edges) as bar:", ") parser.add_argument( '--write_rr_graph', required=True, help='Output rr_graph file' ) parser.add_argument( '--write_rr_node_map',", "print('{} Creating tracks'.format(now())) segment_id = graph.get_segment_id_from_name('dummy') create_track_rr_graph( conn, graph, node_mapping,", "== tracks.Direction.BOTTOM: assert bottom_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[bottom_graph_node_pkey]", "phy_tile_pkey FROM tile_map WHERE tile_pkey = ? ) );\"\"\", (pin,", "dest_graph_node not in node_mapping: continue if pip_pkey is not None:", "# BUFHCE sites are now routed through, without the need", "SELECT name FROM phy_tile WHERE name LIKE \"CLK_HROW_BOT_R_%\" OR name", "CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features = [feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1]))", "and serialize output to rr_graph XML. \"\"\" import argparse import", "print('{} Importing graph nodes'.format(now())) import_graph_nodes(conn, graph, node_mapping, connection_box_map) # Walk", "parser.add_argument( '--db_root', required=True, help='Project X-Ray Database' ) parser.add_argument('--part', required=True, help='FPGA", "feature = \"{}{}\".format(unchanged_feature, changed_feature) feature_path = feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1 #", "pkey = ?;\"\"\", (switch_pkey, ) ) (switch_name, ) = cur.fetchone()", "dest_wire_in_tile_pkey = cur.fetchone() cur.execute( \"\"\"SELECT name FROM wire_in_tile WHERE pkey", "There is no feature assosciated with that PIP in the", "in x_min,y_min,x_max,y_max', ) parser.add_argument( '--vpr_capnp_schema_dir', help='Directory container VPR schema files',", "= IOI_OCLK.fullmatch(feature_path[1]) if m: enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1), feature_path[-1]", "site.pkey = site_as_tile.site_pkey WHERE site_as_tile.pkey = ?\"\"\", (site_as_tile_pkey, ) )", "else '0' # This is the value to attach to", "name FROM wire_in_tile WHERE pkey = ?;\"\"\", (dest_wire_in_tile_pkey, ) )", "0: bar.update(idx) def create_channels(conn): cur = conn.cursor() cur.execute( \"\"\" SELECT", "JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey INNER JOIN wire_in_tile ON", "according to their Y coordinates. \"\"\" cur.execute( \"\"\" SELECT name", "info in cur: x_list.append(graph2.ChannelList(idx, info)) cur.execute('SELECT idx, info FROM y_list;')", "for idx, node in enumerate(nodes): yield node if idx %", "parser.add_argument( '--connection_database', help='Database of fabric connectivity', required=True ) parser.add_argument( '--synth_tiles',", "ON phy_tile.pkey = wire.phy_tile_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey =", ") graph = capnp_graph.graph if synth_tiles is None: synth_tiles =", "= get_tile_and_site_as_tile_pkey( node.loc.x_low, node.loc.y_low ) if site_as_tile_pkey is not None:", "cur.fetchone() print('{} Import alive tracks'.format(now())) alive_tracks = set() for (track_pkey,", "nodes with graph_node's in the connection_database. print('{} Importing graph nodes'.format(now()))", "else: assert False, node_type canonical_loc = None cur2.execute( \"\"\" SELECT", "create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0) def get_site_from_site_as_tile(site_as_tile_pkey): cur.execute( \"\"\" SELECT site.site_type_pkey, site_as_tile.site_pkey FROM", "elif m.group(2) == 'BOT': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1)) ) hrow_tile =", "cur.fetchone()[0] set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ) def", "parser.add_argument( '--read_rr_graph', required=True, help='Input rr_graph file' ) parser.add_argument( '--write_rr_graph', required=True,", "_ = grid.dims() for tile in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile)", "result is not None, (wire_in_tile_pkey, tile_pkey) ( top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey,", "CHANY from the database. IPIN and OPIN rr_nodes should already", "box to an IPIN node. \"\"\" node_dict = graph.nodes[node_idx]._asdict() node_dict['connection_box']", "= 'SYN-VCC' wire = 'VCC' elif pin['port_type'] == 'GND': tile_type", "break hclk_cmt_x = hclk_x while hclk_cmt_x < x_max: hclk_cmt_x +=", "= create_get_tile_name(conn) get_pip_wire_names = create_get_pip_wire_names(conn) switch_name_map = {} print('{} Importing", "create_get_pip_wire_names(conn) switch_name_map = {} print('{} Importing edges from database.'.format(now())) with", "'--read_rr_graph', required=True, help='Input rr_graph file' ) parser.add_argument( '--write_rr_graph', required=True, help='Output", "Assign a connection box to an IPIN node. \"\"\" node_dict", "= 'SYN-GND' wire = 'GND' else: assert False, pin track_node", "schema files', ) print('{} Starting routing import'.format(now())) args = parser.parse_args()", "graph.get_switch_id(switch_name) switch_name_map[switch_pkey] = switch_id else: switch_id = switch_name_map[switch_pkey] return switch_id", "return get_pip_wire_names def get_number_graph_edges(conn, graph, node_mapping): num_edges = len(graph.edges) print('{}", "y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'], ) print('{} generating routing graph for ROI.'.format(now()))", "site_pin_pkey = ( SELECT pkey FROM site_pin WHERE site_type_pkey =", "cur.execute( \"SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?\", (connection_box_wire_pkey,", "(src_net, dest_net) return get_pip_wire_names def get_number_graph_edges(conn, graph, node_mapping): num_edges =", "the hierarchy (its tile-wide, not site-wide). So here we #", "from database. cur = conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\")", "grid_y DESC LIMIT 1;\") y_max = cur.fetchone()[0] return x_max +", "output_file_name=args.write_rr_graph, ) graph = capnp_graph.graph if synth_tiles is None: synth_tiles", "for node in nodes: x = node.loc.x_low y = node.loc.y_low", "def set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ): \"\"\"", "LIMIT 1;\") x_max = cur.fetchone()[0] cur.execute(\"SELECT grid_y FROM phy_tile ORDER", "int(m.group(2)) % 50 == 0 # This is the value", "WHERE pkey = ? );\"\"\", (wire_pkey, ) ) (track_pkey, )", "They are used in differential input mode. # # Vivado", "placing them, therefore, # when the relative pip is traversed,", "tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1]) cur.execute( \"\"\" SELECT top_graph_node_pkey, bottom_graph_node_pkey,", "( SELECT node_pkey FROM wire WHERE pkey = ? );\"\"\",", "\"GND\": { 'loc': gnd_loc, 'pins': [ { 'wire': 'GND', 'pad':", "from hilbertcurve.hilbertcurve import HilbertCurve import math import prjxray.db from prjxray.roi", "x_max: hclk_cmt_x += 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type", "use_roi = False roi = None synth_tiles = None capnp_graph", "tile_pkey) ( top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey ) = result side", "in node_mapping: continue num_edges += 1 return num_edges def import_graph_edges(conn,", "FROM phy_tile WHERE pkey IN ( SELECT phy_tile_pkey FROM tile_map", "track_nodes = get_track_model(conn, track_pkey) option = list( tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc']) ).values()", "now = datetime.datetime.now HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX", "differential output mode. # There is no feature assosciated with", "cur.execute( \"\"\" SELECT track_pkey FROM node WHERE pkey = (", "channels_obj): N = 2 p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max))) point_map =", ").values() ) assert len(option) > 0, (pin, len(option)) if pin['port_type']", "y)] return get_tile_and_site_as_tile_pkey def create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0) def get_site_from_site_as_tile(site_as_tile_pkey): cur.execute( \"\"\"", "return x_max + 1, y_max + 1 def find_constant_network(graph): \"\"\"", "node_idx, connection_box_map ) elif side == tracks.Direction.BOTTOM: assert bottom_graph_node_pkey is", "results[0] @functools.lru_cache(maxsize=0) def get_site_as_tile_wire(site_as_tile_pkey, pin): site_type_pkey, site_pkey = get_site_from_site_as_tile( site_as_tile_pkey", "cur: x_list.append(graph2.ChannelList(idx, info)) cur.execute('SELECT idx, info FROM y_list;') y_list =", "c_out=0.0, c_internal=internal_capacitance, t_del=intrinsic_delay, p_cost=penalty_cost, ), sizing=graph2.SwitchSizing( mux_trans_size=0, buf_size=0, ), )", "False, (rebuf_tile, rebuf_wire_name) for node_pkey in REBUF_NODES: cur.execute( \"\"\" SELECT", "= results[0][0] tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1]) cur.execute( \"\"\" SELECT", "{} final {}'.format(num_channels, len(alive_tracks))) def add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles):", "HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature,", "alive = 1;\"): alive_tracks.add(track_pkey) print('{} Importing alive tracks'.format(now())) import_tracks(conn, alive_tracks,", "accomodate this # need, as the IOI_SING tiles have the", "node_mapping[graph_node_pkey] = graph.add_track( track=track, segment_id=segment_id, ptc=ptc, timing=graph2.NodeTiming( r=resistance, c=capacitance, ),", "gnd_loc, 'pins': [ { 'wire': 'GND', 'pad': 'GND', 'port_type': 'GND',", "assosciated with that PIP in the prjxray db but there", "None, (tile_type, pin_name) node_mapping[top_graph_node_pkey] = node.id update_connection_box( conn, graph, top_graph_node_pkey,", "None: wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin) else: cur.execute( \"\"\" SELECT pkey", "tile, wire_name in cur: REBUF_SOURCES[(tile, wire_name)] = node_pkey HCLK_CMT_TILES =", "pkey = ?\", (connection_box_wire_pkey, ) ) wire_in_tile_pkey = cur.fetchone()[0] box_id", "while the BOTTOM IOI_SING # shares bits with the top", "which IOI_SING is selected (top or bottom) # # Example:", "'{} {}.{}'.format(feature, parts[0], wire_feature) return feature # CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L,", "in enumerate(cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey, switch_pkey, phy_tile_pkey, pip_in_tile_pkey, backward FROM", "BOTTOM of an IOI column m = IOI_SING_REGEX.fullmatch(feature) if m:", "y_loc) enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc ) enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format(", "SELECT name FROM segment WHERE pkey = ( SELECT segment_pkey", "x_max, _, _ = grid.dims() for tile in grid.tiles(): gridinfo", "Returns a name of CLK_HROW tile only if its there", "# were not emitted in rrgraph XML. # # TODO:", "JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey WHERE wire.wire_in_tile_pkey IN (SELECT", "feature assosciated with that PIP in the prjxray db but", "tracks_model, track_nodes = get_track_model(conn, track_pkey) option = list( tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc'])", "SELECT DISTINCT node_pkey FROM wire WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey", "dest_graph_node, switch_pkey, phy_tile_pkey, pip_pkey, backward) in enumerate(cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey,", "wire_in_tile_pkey, tile_type_pkey, wire_name in cur: connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box( reduce_connection_box(wire_name) )", "lambda x: id_map[x] def main(): parser = argparse.ArgumentParser() parser.add_argument( '--db_root',", "site_as_tile INNER JOIN site ON site.pkey = site_as_tile.site_pkey WHERE site_as_tile.pkey", "= {} for node in nodes: x = node.loc.x_low y", "pin = m.group(2) tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey( node.loc.x_low, node.loc.y_low )", "' '.join((feature, enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1]", "node_mapping[bottom_graph_node_pkey] = node.id update_connection_box( conn, graph, bottom_graph_node_pkey, node_idx, connection_box_map )", "Importing edges from database.'.format(now())) with progressbar_utils.ProgressBar(max_value=num_edges) as bar: for idx,", "in point_map[coord]: id_map[old_id] = idx idx += 1 del point_map[coord]", "node.loc.y_low if (x, y) not in point_map: point_map[(x, y)] =", "y_dim = phy_grid_dims(conn) connection_box_obj = graph.create_connection_box_object( x_dim=x_dim, y_dim=y_dim ) num_edges", "the IOI_SING tiles have the same wire, and pip names", "conn.cursor() for (graph_node_pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc,", "tiles = {} for tile_pkey, site_as_tile_pkey, grid_x, grid_y in cur.execute(\"\"\"", "name FROM wire_in_tile WHERE pkey = ?;\"\"\", (src_wire_in_tile_pkey, ) )", ") = cur.fetchone() print('{} Import alive tracks'.format(now())) alive_tracks = set()", "name FROM phy_tile WHERE pkey = ?; \"\"\", (tile_pkey, )", "print('{} Importing edges from database.'.format(now())) with progressbar_utils.ProgressBar(max_value=num_edges) as bar: for", "= ( SELECT pkey FROM site_pin WHERE site_type_pkey = ?", "block_type.id assert 'SYN-GND' in block_types assert 'SYN-VCC' in block_types gnd_block_id", "= HCLK_OUT.fullmatch(feature_path[-1]) if m: return ' '.join( [feature] + find_hclk_cmt_hclk_feature(", "the top half of a normal IOI TILE. # #", "graph, node_mapping): # First yield existing edges print('{} Importing existing", "functools import pickle import sqlite3 now = datetime.datetime.now HCLK_CK_BUFHCLK_REGEX =", "grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number): if", "m.group(2) == 'BOT': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"above\"]", "on top or bottom of the whole # IOI/IOB column.", ") grid_x, grid_y = cur.fetchone() cur.execute( \"SELECT wire_in_tile_pkey FROM wire", "REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"above\"] if hrow_tile is", "(connection_box_wire_pkey, ) ) grid_x, grid_y = cur.fetchone() cur.execute( \"SELECT wire_in_tile_pkey", "import functools import pickle import sqlite3 now = datetime.datetime.now HCLK_CK_BUFHCLK_REGEX", "= {} for i, tile_name in enumerate(rebuf_and_hrow_tiles): if tile_name is", "num_edges = get_number_graph_edges(conn, graph, node_mapping) print('{} Serializing to disk.'.format(now())) capnp_graph.serialize_to_capnp(", "vcc_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone() elif pin['port_type'] ==", "graph, node_mapping, grid, synth_tiles) print('{} Creating channels.'.format(now())) channels_obj = create_channels(conn)", "= re.compile('IOI_OCLK_([01])') # Regex for [LR]IOI_SING tiles IOI_SITE_PIPS = ['OLOGIC',", "feature needs to be added. # The relevant features are:", ") = result side = node.loc.side if side == tracks.Direction.LEFT:", "node_idx, node.loc.x_low, node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0., ) continue m = PIN_NAME_TO_PARTS.match(pin_name)", "print('original {} final {}'.format(num_channels, len(alive_tracks))) def add_synthetic_edges(conn, graph, node_mapping, grid,", "{} for node in nodes: x = node.loc.x_low y =", "sink_node, switch_id, ()) if idx % 1024 == 0: bar.update(idx)", "capnp_graph = capnp_graph2.Graph( rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp' ), input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph,", "yield (edge.src_node, edge.sink_node, edge.switch_id, None) # Then yield edges from", "FROM graph_edge; \"\"\")): if src_graph_node not in node_mapping: continue if", "conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\") for src_graph_node, dest_graph_node in", "(gridloc.block_type_id, node.loc.ptc)] # Synthetic blocks are handled below. if pin_name.startswith('SYN-'):", "db=db, x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'], ) print('{} generating routing graph", "? )\"\"\", (track_pkey, ) ) result = cur2.fetchone() if result", "implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 \"\"\" # IOI_SING tiles have bits in common", "node_mapping, grid, synth_tiles): cur = conn.cursor() delayless_switch = graph.get_switch_id('__vpr_delayless_switch__') for", "HilbertCurve import math import prjxray.db from prjxray.roi import Roi import", "as a PIP but in the prjxray db it #", "tile_pkey = ? ) );\"\"\", (pin, tile_pkey) ) results =", "name FROM wire_in_tile WHERE pkey IN ( SELECT DISTINCT wire_in_tile_pkey", "graph nodes and add them. print('{} Creating tracks'.format(now())) segment_id =", "coord = tuple(hilbert_curve.coordinates_from_distance(h)) if coord not in point_map: continue for", "This can be removed once # https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 # is fixed.", "help='Database of fabric connectivity', required=True ) parser.add_argument( '--synth_tiles', help='If using", "'IDELAY', 'OCLK_', 'OCLKM_'] IOI_SING_REGEX = re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format( \"|\".join(IOI_SITE_PIPS) ) )", "point_map[coord]: id_map[old_id] = idx idx += 1 del point_map[coord] return", "SELECT segment_pkey FROM track WHERE pkey = ? )\"\"\", (track_pkey,", "feature_path[-1] ) return ' '.join((feature, enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features =", "not None: feature = check_feature(pip_name) if feature: yield ( src_node,", "graph.create_connection_box_object( x_dim=x_dim, y_dim=y_dim ) num_edges = get_number_graph_edges(conn, graph, node_mapping) print('{}", "== \"IOB_DIFFO_IN1\": return '{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0 # #", "if result is not None: segment_name = result[0] segment_id =", "\"\"\" Reduce the number of connection boxes by merging some.", "REBUF tile its above and below CLK_HROW tile. Note that", "resistance) in progressbar_utils.progressbar(cur.execute(\"\"\" SELECT pkey, track_pkey, graph_node_type, x_low, x_high, y_low,", "c=capacitance, ), canonical_loc=canonical_loc ) def create_track_rr_graph( conn, graph, node_mapping, use_roi,", "len(option) > 0, (pin, len(option)) if pin['port_type'] == 'input': tile_type", "connection_box_map ) else: assert False, side def import_tracks(conn, alive_tracks, node_mapping,", "graph, node_mapping, use_roi, roi, synth_tiles, segment_id ): cur = conn.cursor()", "wire_in_tile WHERE pkey = ?;\"\"\", (dest_wire_in_tile_pkey, ) ) (dest_net, )", "cur.execute( \"\"\" SELECT chan_width_max, x_min, x_max, y_min, y_max FROM channel;\"\"\"", "IO tiles to the routing node specified. Rough structure: Add", "(tile_type, pin_name) node_mapping[top_graph_node_pkey] = node.id update_connection_box( conn, graph, top_graph_node_pkey, node_idx,", "print('{} Counting edges.'.format(now())) cur = conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\"", "in point_map: continue for old_id in point_map[coord]: id_map[old_id] = idx", "num_edges def import_graph_edges(conn, graph, node_mapping): # First yield existing edges", "is not None, (wire_in_tile_pkey, tile_pkey) ( top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey", "def create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id ):", "IOI_SING tiles have the same wire, and pip names #", "(pin, tile_pkey) ) results = cur.fetchall() assert len(results) == 1", "== 0: bar.update(idx) def phy_grid_dims(conn): \"\"\" Returns physical grid dimensions.", "to specified dimensions in x_min,y_min,x_max,y_max', ) parser.add_argument( '--vpr_capnp_schema_dir', help='Directory container", "selected (top or bottom) # # Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1", "roi, synth_tiles, segment_id ): cur = conn.cursor() cur.execute(\"\"\"SELECT count(*) FROM", "y_max = map(int, args.graph_limit.split(',')) roi = Roi( db=db, x1=x_min, y1=y_min,", "the BOTTOM IOI_SING # shares bits with the top half", "graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ): \"\"\" Assign a", "connection box to an IPIN node. \"\"\" node_dict = graph.nodes[node_idx]._asdict()", "def maybe_get_clk_hrow(i): \"\"\" Returns a name of CLK_HROW tile only", "if wire_feature is not None: return '{} {}.{}'.format(feature, parts[0], wire_feature)", "(num_edges, ) = cur.fetchone() get_tile_name = create_get_tile_name(conn) get_pip_wire_names = create_get_pip_wire_names(conn)", "back missing switchs, which were unused in arch xml, and", "> 0: hclk_cmt_x -= 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if", "== 'input', synth_tile['pins'] ) ) ) for pin in synth_tile['pins']:", "\"\"\", (tile_pkey, ) ) return cur.fetchone()[0] return get_tile_name def create_get_pip_wire_names(conn):", "box ids for all connection box types. \"\"\" cur =", ".HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 \"\"\" # IOI_SING tiles have bits in", "by 50 to get the relative # position of the", "?;\"\"\", (src_wire_in_tile_pkey, ) ) (src_net, ) = cur.fetchone() cur.execute( \"\"\"SELECT", "roi = None synth_tiles = None capnp_graph = capnp_graph2.Graph( rr_graph_schema_fname=os.path.join(", "is not None: cur.execute( \"\"\" SELECT grid_x, grid_y FROM phy_tile", "a list sorted according to their Y coordinates. \"\"\" cur.execute(", "return get_site_as_tile_wire def import_graph_nodes(conn, graph, node_mapping, connection_box_map): cur = conn.cursor()", "): cur = conn.cursor() cur.execute(\"\"\"SELECT count(*) FROM track;\"\"\") (num_channels, )", "timing=graph2.NodeTiming( r=resistance, c=capacitance, ), canonical_loc=canonical_loc ) def create_track_rr_graph( conn, graph,", "name='synth_{}_{}'.format(tile_name, pin['wire']), ) else: assert False, pin def get_switch_name(conn, graph,", "+ rebuf_and_hrow_tiles + [None] def maybe_get_clk_hrow(i): \"\"\" Returns a name", "else: assert False, (rebuf_tile, rebuf_wire_name) for node_pkey in REBUF_NODES: cur.execute(", "conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id ) # Set", "them, therefore, # when the relative pip is traversed, the", "if feature_path[2] == \"IOB_DIFFO_OUT0\" and \\ feature_path[1] == \"IOB_DIFFO_IN1\": return", "t['port_type'] == 'input', synth_tile['pins'] ) ) ) for pin in", "the routing node specified. Rough structure: Add rr_nodes for CHANX", "ON wire_in_tile.pkey = wire.wire_in_tile_pkey INNER JOIN phy_tile ON phy_tile.pkey =", "for tile_pkey, site_as_tile_pkey, grid_x, grid_y in cur.execute(\"\"\" SELECT pkey, site_as_tile_pkey,", "vcc_loc is not None synth_tiles = { 'tiles': { \"VCC\":", "args.part) populate_hclk_cmt_tiles(db) synth_tiles = None if args.synth_tiles: use_roi = True", "src_wire_in_tile_pkey, dest_wire_in_tile_pkey FROM pip_in_tile WHERE pkey = ?;\"\"\", (pip_pkey, )", "site_pkey = ? ;\"\"\", (site_type_pkey, pin, site_pkey) ) results =", "return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)] def check_feature(feature): \"\"\" Check if enabling this", "node.loc.x_low, node.loc.y_low ) if site_as_tile_pkey is not None: wire_in_tile_pkey =", "return feature # CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS = re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$')", ") for k in node_mapping: node_mapping[k] = node_remap(node_mapping[k]) print('{} Writing", "have bits in common with the IOI tiles. # #", "cur.execute( \"\"\" SELECT name FROM phy_tile WHERE name LIKE \"CLK_HROW_BOT_R_%\"", "in REBUF_SOURCES: return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m = IOI_OCLK.fullmatch(feature_path[1])", "point_map[coord] return lambda x: id_map[x] def main(): parser = argparse.ArgumentParser()", "synth_tiles = None capnp_graph = capnp_graph2.Graph( rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp' ),", "r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format( \"|\".join(IOI_SITE_PIPS) ) ) def reduce_connection_box(box): \"\"\" Reduce the number", "left_graph_node_pkey, right_graph_node_pkey ) = result side = node.loc.side if side", "len(results) == 1, site_as_tile_pkey return results[0] @functools.lru_cache(maxsize=0) def get_site_as_tile_wire(site_as_tile_pkey, pin):", "graph_edge table. Import channel XML node from connection database and", "is fixed. try: graph.get_switch_id(name) continue except KeyError: capnp_graph.add_switch( graph2.Switch( id=None,", "cur.execute( \"\"\"SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey FROM pip_in_tile WHERE pkey = ?;\"\"\",", "# Find nodes touching rebuf wires. cur.execute( \"\"\" WITH rebuf_wires(wire_in_tile_pkey)", "\"below\": maybe_get_clk_hrow(i + 1), } # Find nodes touching rebuf", "graph2.NodeType(graph_node_type) if node_type == graph2.NodeType.CHANX: direction = 'X' x_low =", "= 1;\"): alive_tracks.add(track_pkey) print('{} Importing alive tracks'.format(now())) import_tracks(conn, alive_tracks, node_mapping,", "= {} rebuf_wire_regexp = re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' ) cur = conn.cursor()", "in x_loc_str: x_loc = 1 else: assert False, \"Impossible to", "OR name LIKE \"CLK_HROW_TOP_R_%\" OR name LIKE \"CLK_BUFG_REBUF_%\" ORDER BY", "BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature,", "and pip names # despite they are found on the", "Match site pins rr nodes with graph_node's in the connection_database.", "left_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[left_graph_node_pkey] = node.id update_connection_box(", "0 # This is the value to attach to the", "= node.loc.y_low if (x, y) not in point_map: point_map[(x, y)]", "+= 1 return num_edges def import_graph_edges(conn, graph, node_mapping): # First", "wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ) SELECT rebuf_nodes.node_pkey, phy_tile.name,", "get_site_as_tile_wire(site_as_tile_pkey, pin): site_type_pkey, site_pkey = get_site_from_site_as_tile( site_as_tile_pkey ) cur.execute( \"\"\"", "wire_pkey ) elif pin['port_type'] == 'VCC': cur.execute('SELECT vcc_track_pkey FROM constant_sources')", "# Example: IOI_OLOGIC0 -> IOI_OLOGIC1 dst_value = '0' if is_bottom_sing", "segment_pkey FROM track WHERE pkey = ? )\"\"\", (track_pkey, )", "import progressbar_utils import datetime import re import functools import pickle", "segment_id ) # Set of (src, sink, switch_id) tuples that", "import re import functools import pickle import sqlite3 now =", "synth_tiles = None if args.synth_tiles: use_roi = True with open(args.synth_tiles)", "lib.rr_graph import graph2 from lib.rr_graph import tracks from lib.connection_database import", "WHERE name LIKE \"CLK_HROW_BOT_R_%\" OR name LIKE \"CLK_HROW_TOP_R_%\" OR name", "id=box_id, site_pin_delay=site_pin_delay, ) graph.nodes[node_idx] = graph2.Node(**node_dict) def update_connection_box( conn, graph,", ") cur.execute( \"\"\" SELECT pkey FROM wire_in_tile WHERE site_pin_pkey =", "continue hclk_x, hclk_y = grid.loc_of_tilename(tile) hclk_cmt_x = hclk_x hclk_cmt_y =", "hclk_cmt_x += 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type ==", "= \"{}{}\".format(dst_value, src_wire) feature = \"{}{}\".format(unchanged_feature, changed_feature) feature_path = feature.split('.')", "if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return '", "): \"\"\" Assign a connection box to an IPIN node.", "# # TODO: This can be removed once # https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354", "rebuf_nodes(node_pkey) AS ( SELECT DISTINCT node_pkey FROM wire WHERE wire_in_tile_pkey", "below. if pin_name.startswith('SYN-'): set_connection_box( graph, node_idx, node.loc.x_low, node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0.,", "idx % 1024 == 0: bar.update(idx) def phy_grid_dims(conn): \"\"\" Returns", "tile_name = get_tile_name(phy_tile_pkey) src_net, dest_net = get_pip_wire_names(pip_pkey) if not backward:", "= cur.fetchone()[0] return x_max + 1, y_max + 1 def", "Then yield edges from database. cur = conn.cursor() cur.execute(\"SELECT count()", "to disk.'.format(now())) capnp_graph.serialize_to_capnp( channels_obj=channels_obj, connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges, edges_obj=import_graph_edges(conn, graph,", "if vcc_block_id == grid_loc.block_type_id: assert vcc_loc is None vcc_loc =", "\"Impossible to determine X location of BUFHCE\" y_loc = m.group(2)", "help='Input rr_graph file' ) parser.add_argument( '--write_rr_graph', required=True, help='Output rr_graph file'", "graph_edge; \"\"\")): if src_graph_node not in node_mapping: continue if dest_graph_node", "def get_switch_name(conn, graph, switch_name_map, switch_pkey): assert switch_pkey is not None", "connection_box_map def yield_nodes(nodes): with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar: for idx, node", "m: x_loc_str = m.group(1) if 'L' in x_loc_str: x_loc =", "in the rr_graph. Add rr_edge for each row in the", "elif pin['port_type'] in ['VCC', 'GND', 'output']: graph.add_edge( src_node=pin_node[0][0], sink_node=node_mapping[track_node], switch_id=delayless_switch,", "used in differential input mode. # # Vivado does not", "graph.get_segment_id_from_name('dummy') create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id )", ") SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name FROM rebuf_nodes INNER JOIN wire", "FROM wire WHERE pkey = ?\", (connection_box_wire_pkey, ) ) wire_in_tile_pkey", "grid_x FROM phy_tile ORDER BY grid_x DESC LIMIT 1;\") x_max", "if m: enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1) ) return '", "create_channels(conn) node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj) x_dim, y_dim = phy_grid_dims(conn) connection_box_obj", "enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature))", "to determine X location of BUFHCE\" y_loc = m.group(2) bufhce_loc", "cur.fetchone()[0] box_id = connection_box_map[wire_in_tile_pkey] cur.execute( \"\"\" SELECT switch.intrinsic_delay FROM switch", "FROM phy_tile WHERE name LIKE \"CLK_HROW_BOT_R_%\" OR name LIKE \"CLK_HROW_TOP_R_%\"", "= get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1]) cur.execute( \"\"\" SELECT top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey", "grid_x, grid_y FROM phy_tile WHERE pkey = ( SELECT phy_tile_pkey", "specified. Rough structure: Add rr_nodes for CHANX and CHANY from", "if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1] ) return '", "FROM graph_edge;\" \"\") (num_edges, ) = cur.fetchone() get_tile_name = create_get_tile_name(conn)", "continue num_edges += 1 return num_edges def import_graph_edges(conn, graph, node_mapping):", "in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']: continue hclk_x, hclk_y = grid.loc_of_tilename(tile) hclk_cmt_x =", "['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']: if pip in src_wire: src_wire", "get_switch_name(conn, graph, switch_name_map, switch_pkey): assert switch_pkey is not None if", "None, (tile_type, pin_name) node_mapping[left_graph_node_pkey] = node.id update_connection_box( conn, graph, left_graph_node_pkey,", "\"\"\"SELECT name FROM switch WHERE pkey = ?;\"\"\", (switch_pkey, )", "tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc']) ).values() ) assert len(option) > 0, (pin, len(option))", "def update_connection_box( conn, graph, graph_node_pkey, node_idx, connection_box_map ): \"\"\" Update", "x_min, x_max, y_min, y_max FROM channel;\"\"\" ) chan_width_max, x_min, x_max,", "ROI, synthetic tile defintion from prjxray-arch-import' ) parser.add_argument( '--graph_limit', help='Limit", "# is fixed. try: graph.get_switch_id(name) continue except KeyError: capnp_graph.add_switch( graph2.Switch(", "hclk_x, hclk_y = grid.loc_of_tilename(tile) hclk_cmt_x = hclk_x hclk_cmt_y = hclk_y", "is not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) else:", "tuple(synth_tile['loc']), pin_name ) if pin['port_type'] == 'input': graph.add_edge( src_node=node_mapping[track_node], sink_node=pin_node[0][0],", "(grid_loc.x, grid_loc.y) if vcc_block_id == grid_loc.block_type_id: assert vcc_loc is None", "def get_pip_wire_names(pip_pkey): cur.execute( \"\"\"SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey FROM pip_in_tile WHERE pkey", "Examples: >>> reduce_connection_box('IMUX0') 'IMUX' >>> reduce_connection_box('IMUX1') 'IMUX' >>> reduce_connection_box('IMUX10') 'IMUX'", ">>> reduce_connection_box('IMUX0') 'IMUX' >>> reduce_connection_box('IMUX1') 'IMUX' >>> reduce_connection_box('IMUX10') 'IMUX' >>>", "= node.id update_connection_box( conn, graph, top_graph_node_pkey, node_idx, connection_box_map ) elif", "backward FROM graph_edge; \"\"\")): if src_graph_node not in node_mapping: continue", "JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey WHERE wire.node_pkey = ?;\"\"\",", "?\"\"\", (graph_node_pkey, ) ) connection_box_wire_pkey = cur.fetchone()[0] if connection_box_wire_pkey is", "not None synth_tiles = { 'tiles': { \"VCC\": { 'loc':", "= len( list( filter( lambda t: t['port_type'] == 'input', synth_tile['pins']", "Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles. returns them in", "not None, (tile_type, pin_name) node_mapping[bottom_graph_node_pkey] = node.id update_connection_box( conn, graph,", "()) if idx % 1024 == 0: bar.update(idx) def create_channels(conn):", "site_as_tile_pkey is not None: wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin) else: cur.execute(", "in common with the IOI tiles. # # The difference", "REBUF_NODES[node_pkey] = [] m = rebuf_wire_regexp.fullmatch(rebuf_wire_name) if m.group(2) == 'TOP':", "# Each clock region spans a total of 50 IOBs.", "required=True, help='Project X-Ray Database' ) parser.add_argument('--part', required=True, help='FPGA part') parser.add_argument(", "The \"DIFF_OUT\" cannot be set in the architecture as it", "if m: # Each clock region spans a total of", "pkey = ?;\"\"\", (pip_pkey, ) ) src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone()", "src_graph_node_pkey, dest_graph_node_pkey, switch_pkey, phy_tile_pkey, pip_in_tile_pkey, backward FROM graph_edge; \"\"\")): if", "synth_tile['pins'] ) ) ) for pin in synth_tile['pins']: if pin['port_type']", "SELECT name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, switch_type FROM switch;\"\"\"): #", "chan_width_max, x_min, x_max, y_min, y_max = cur.fetchone() cur.execute('SELECT idx, info", "m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m: x_loc_str = m.group(1) if 'L'", "pin['port_type'] in ['input', 'output']: wire_pkey = get_wire_pkey(conn, tile_name, pin['wire']) cur.execute(", "track;\"\"\") (num_channels, ) = cur.fetchone() print('{} Import alive tracks'.format(now())) alive_tracks", "get_tile_name(tile_pkey): cur.execute( \"\"\" SELECT name FROM phy_tile WHERE pkey =", "from lib.rr_graph import tracks from lib.connection_database import get_wire_pkey, get_track_model import", "print('{} Writing node map.'.format(now())) with open(args.write_rr_node_map, 'wb') as f: pickle.dump(node_mapping,", "relative pip is traversed, the correct fasm feature needs to", "= graph2.NodeType(graph_node_type) if node_type == graph2.NodeType.CHANX: direction = 'X' x_low", "hclk_cmt_x = hclk_x hclk_cmt_y = hclk_y while hclk_cmt_x > 0:", "x_dim=x_dim, y_dim=y_dim ) num_edges = get_number_graph_edges(conn, graph, node_mapping) print('{} Serializing", "disable the inverter on CE input which is connected to", "- IN_USE: to enable the BUFHCE site # - ZINV_CE:", "type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming( r=drive_resistance, c_in=0.0, c_out=0.0, c_internal=internal_capacitance, t_del=intrinsic_delay, p_cost=penalty_cost, ), sizing=graph2.SwitchSizing(", "in node_mapping: node_mapping[k] = node_remap(node_mapping[k]) print('{} Writing node map.'.format(now())) with", "if rebuf_key in REBUF_SOURCES: return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m", "enabled. Some pips imply other features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10", "None synth_tiles = None capnp_graph = capnp_graph2.Graph( rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp'", "node_mapping: continue num_edges += 1 return num_edges def import_graph_edges(conn, graph,", "pseudo-pip we simply reject fasm # features here. if feature_path[2]", "'pins': [ { 'wire': 'VCC', 'pad': 'VCC', 'port_type': 'VCC', 'is_clock':", "is_bottom_sing else '0' # This is the value to attach", ") result = cur2.fetchone() if result: canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1])", "tile.startswith(\"CLK_HROW\"): return tile return None # Assign each REBUF tile", "tile_name, pin['wire']) cur.execute( \"\"\" SELECT track_pkey FROM node WHERE pkey", "m.group(4) ) src_wire = m.group(6).replace('_SING', '') for pip in ['IMUX',", "\"\"\" cur = conn.cursor() cur.execute(\"SELECT grid_x FROM phy_tile ORDER BY", "'VCC' elif pin['port_type'] == 'GND': tile_type = 'SYN-GND' wire =", "1 return num_edges def import_graph_edges(conn, graph, node_mapping): # First yield", "of CLK_HROW tile only if its there on the list.", "= src_wire.replace('_0', '_{}'.format(src_value)) if 'IOI_OCLK' in src_wire: src_wire = src_wire.replace('_0',", "wire_pkey = get_wire_pkey(conn, tile_name, pin['wire']) cur.execute( \"\"\" SELECT track_pkey FROM", "= parser.parse_args() db = prjxray.db.Database(args.db_root, args.part) populate_hclk_cmt_tiles(db) synth_tiles = None", ") hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"below\"] if hrow_tile is not None: REBUF_NODES[node_pkey].append(", "use_roi, roi, synth_tiles, segment_id ): cur = conn.cursor() cur.execute(\"\"\"SELECT count(*)", "top_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[top_graph_node_pkey] = node.id update_connection_box(", "synth_tile['pins'] ) ) ) num_outpad = len( list( filter( lambda", "not emitted in rrgraph XML. # # TODO: This can", "set in the architecture as it is defined one #", "(grid_loc.x, grid_loc.y) assert gnd_loc is not None assert vcc_loc is", "print('{} Starting routing import'.format(now())) args = parser.parse_args() db = prjxray.db.Database(args.db_root,", "y): return tiles[(x, y)] return get_tile_and_site_as_tile_pkey def create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0) def", "1) else: assert False, node_type canonical_loc = None cur2.execute( \"\"\"", "on the list. \"\"\" tile = rebuf_and_hrow_tiles[i] if tile is", "switch_id = graph.get_switch_id(switch_name) switch_name_map[switch_pkey] = switch_id else: switch_id = switch_name_map[switch_pkey]", "1): coord = tuple(hilbert_curve.coordinates_from_distance(h)) if coord not in point_map: continue", "hclk_y = grid.loc_of_tilename(tile) hclk_cmt_x = hclk_x hclk_cmt_y = hclk_y while", "remove_vpr_tile_prefix import simplejson as json from lib import progressbar_utils import", "return num_edges def import_graph_edges(conn, graph, node_mapping): # First yield existing", "paths with the same switch id. print('{} Adding synthetic edges'.format(now()))", "( SELECT tile_type_pkey FROM phy_tile WHERE pkey IN ( SELECT", "= PIN_NAME_TO_PARTS.match(pin_name) assert m is not None, pin_name tile_type =", ") results = cur.fetchall() assert len(results) == 1, site_as_tile_pkey return", "( SELECT DISTINCT node_pkey FROM wire WHERE wire_in_tile_pkey IN (SELECT", "y_min, y_max FROM channel;\"\"\" ) chan_width_max, x_min, x_max, y_min, y_max", "have these synthetic tiles, search the input rr graph for", "[ { 'wire': 'VCC', 'pad': 'VCC', 'port_type': 'VCC', 'is_clock': False,", "is not None: return '{} {}.{}'.format(feature, parts[0], wire_feature) return feature", "return wire_in_tile_pkey return get_site_as_tile_wire def import_graph_nodes(conn, graph, node_mapping, connection_box_map): cur", "get_number_graph_edges(conn, graph, node_mapping) print('{} Serializing to disk.'.format(now())) capnp_graph.serialize_to_capnp( channels_obj=channels_obj, connection_box_obj=connection_box_obj,", "# Assign each REBUF tile its above and below CLK_HROW", "= re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format( \"|\".join(IOI_SITE_PIPS) ) ) def reduce_connection_box(box): \"\"\" Reduce", "feature_path[1] == \"IOB_DIFFI_IN0\": return '' # REBUF stuff rebuf_key =", "= results[0][0] return wire_in_tile_pkey return get_site_as_tile_wire def import_graph_nodes(conn, graph, node_mapping,", ") return cur.fetchone()[0] return get_tile_name def create_get_pip_wire_names(conn): cur = conn.cursor()", "graph.nodes[node_idx]._asdict() node_dict['connection_box'] = graph2.ConnectionBox( x=grid_x, y=grid_y, id=box_id, site_pin_delay=site_pin_delay, ) graph.nodes[node_idx]", "= hclk_x hclk_cmt_y = hclk_y while hclk_cmt_x > 0: hclk_cmt_x", ") elif pin['port_type'] == 'VCC': cur.execute('SELECT vcc_track_pkey FROM constant_sources') (track_pkey,", "is that the TOP IOI_SING tile shares bits with #", "FROM tile;\"\"\"): tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey) def get_tile_and_site_as_tile_pkey(x, y):", "assert track_node in node_mapping, (track_node, track_pkey) if wire == 'inpad'", "assert switch_pkey is not None if switch_pkey not in switch_name_map:", "= { 'tiles': { \"VCC\": { 'loc': vcc_loc, 'pins': [", "\\ switch_type in cur.execute(\"\"\" SELECT name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost,", "'Y' y_low = max(y_low, 1) else: assert False, node_type canonical_loc", "Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 \"\"\" # IOI_SING tiles have bits", "assert gnd_loc is None gnd_loc = (grid_loc.x, grid_loc.y) if vcc_block_id", "synthetic tile defintion from prjxray-arch-import' ) parser.add_argument( '--graph_limit', help='Limit grid", "track = tracks.Track( direction=direction, x_low=x_low, x_high=x_high, y_low=y_low, y_high=y_high, ) assert", "wire_in_tile WHERE pkey IN ( SELECT DISTINCT wire_in_tile_pkey FROM wire", "graph_node_pkey to rr inode file' ) parser.add_argument( '--connection_database', help='Database of", "m: enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1), feature_path[-1] ) return '", ") chan_width_max, x_min, x_max, y_min, y_max = cur.fetchone() cur.execute('SELECT idx,", "None for grid_loc in graph.grid: if gnd_block_id == grid_loc.block_type_id: assert", "= block_type.id assert 'SYN-GND' in block_types assert 'SYN-VCC' in block_types", "tile_type_pkey, wire_name in cur: connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box( reduce_connection_box(wire_name) ) return", "for [LR]IOI_SING tiles IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_']", "?;\"\"\", (node_pkey, ) ) for tile, wire_name in cur: REBUF_SOURCES[(tile,", "that PIP in the prjxray db but there # is", "site # - ZINV_CE: to disable the inverter on CE", "y)].append(node.id) hilbert_curve = HilbertCurve(p, N) idx = 0 id_map =", "CE signal to constant 1 m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1]) if m:", "['input', 'output']: wire_pkey = get_wire_pkey(conn, tile_name, pin['wire']) cur.execute( \"\"\" SELECT", "== 'GND': tile_type = 'SYN-GND' wire = 'GND' else: assert", "['VCC', 'GND', 'output']: graph.add_edge( src_node=pin_node[0][0], sink_node=node_mapping[track_node], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), )", "ptc, capacitance, resistance FROM graph_node WHERE track_pkey IS NOT NULL;\"\"\")):", "node_mapping, graph, default_segment_id): cur = conn.cursor() cur2 = conn.cursor() for", "CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX", "for each row in the graph_edge table. Import channel XML", "node if needed. \"\"\" cur = conn.cursor() cur.execute( \"\"\" SELECT", ") if site_as_tile_pkey is not None: wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin)", "= conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\") (num_edges, ) =", "find_constant_network(graph): \"\"\" Find VCC and GND tiles and create synth_tiles", "pin['port_type'] == 'input': tile_type = synth_tile['tile_name'] wire = 'outpad' elif", "list( filter( lambda t: t['port_type'] == 'output', synth_tile['pins'] ) )", "graph_nodes and IPIN, OPIN, CHANX and CHANY rr_node ids in", "= db.grid() _, x_max, _, _ = grid.dims() for tile", "src_value = '1' if is_bottom_sing else '0' # This is", "pin['z_loc'], wire ) elif wire == 'outpad' and num_outpad >", "pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net) else: pip_name = '{}.{}.{}'.format(tile_name, src_net,", "HCLK_CMT_TILES HCLK_CMT_TILES = {} grid = db.grid() _, x_max, _,", "results = cur.fetchall() assert len(results) == 1, site_as_tile_pkey return results[0]", "not None, (wire_in_tile_pkey, tile_pkey) ( top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey )", "print('{} Import alive tracks'.format(now())) alive_tracks = set() for (track_pkey, )", "= cur.fetchone()[0] cur.execute(\"SELECT grid_y FROM phy_tile ORDER BY grid_y DESC", "conn.cursor() delayless_switch = graph.get_switch_id('__vpr_delayless_switch__') for tile_name, synth_tile in synth_tiles['tiles'].items(): num_inpad", "conn: populate_bufg_rebuf_map(conn) cur = conn.cursor() for name, internal_capacitance, drive_resistance, intrinsic_delay,", "VCC. # This sets the CE signal to constant 1", "elif node_type == graph2.NodeType.CHANY: direction = 'Y' y_low = max(y_low,", "graph nodes'.format(now())) import_graph_nodes(conn, graph, node_mapping, connection_box_map) # Walk all track", "if feature_path[2] == \"IOB_PADOUT1\" and feature_path[1] == \"IOB_DIFFI_IN0\": return ''", "hclk_number)] def check_feature(feature): \"\"\" Check if enabling this feature requires", "? ) );\"\"\", (pin, tile_pkey) ) results = cur.fetchall() assert", "in alive_tracks: continue cur2.execute( \"\"\" SELECT name FROM segment WHERE", "'B' >>> reduce_connection_box('B_L') 'B' \"\"\" box = CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR'", "re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)')", "enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_buffer_feature))", "enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features = [feature] features.append( '{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) )", "column m = IOI_SING_REGEX.fullmatch(feature) if m: # Each clock region", "WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ORDER BY rebuf_nodes.node_pkey;\"\"\"", "as json from lib import progressbar_utils import datetime import re", ") cur = conn.cursor() # Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF", "create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire = create_get_site_as_tile_wire(cur) for node_idx, node in enumerate(graph.nodes): if", "synth_tile['pins']: if pin['port_type'] in ['input', 'output']: wire_pkey = get_wire_pkey(conn, tile_name,", "\"CLK_BUFG_REBUF_%\" ORDER BY grid_y DESC; \"\"\" ) return [t[0] for", "final {}'.format(num_channels, len(alive_tracks))) def add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles): cur", "node_mapping, connection_box_map) # Walk all track graph nodes and add", "(CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS = re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$') def set_connection_box( graph, node_idx, grid_x,", "AND site_pkey = ? ;\"\"\", (site_type_pkey, pin, site_pkey) ) results", "node_idx, connection_box_map ) elif side == tracks.Direction.RIGHT: assert right_graph_node_pkey is", ") return ' '.join((feature, enable_cascout)) parts = feature.split('.') wire_feature =", "== 0: bar.update(idx) def create_channels(conn): cur = conn.cursor() cur.execute( \"\"\"", "OR name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\" ), rebuf_nodes(node_pkey) AS ( SELECT DISTINCT", "= conn.cursor() get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire = create_get_site_as_tile_wire(cur) for node_idx,", "'HCLK_CMT_L': HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break def", "CHANX and CHANY from the database. IPIN and OPIN rr_nodes", "feature_path[0], bufhce_loc ) enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format( feature_path[0], bufhce_loc ) return", "== 'GND': cur.execute('SELECT gnd_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone()", "graph, node_mapping, connection_box_map) # Walk all track graph nodes and", "to disable the inverter on CE input which is connected", "\"{}{}\".format(unchanged_feature, changed_feature) feature_path = feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # # When", "src_graph_node_pkey, dest_graph_node_pkey FROM graph_edge; \"\"\"): if src_graph_node not in node_mapping:", "None switch_id = get_switch_name( conn, graph, switch_name_map, switch_pkey ) src_node", "in cur.execute(\"\"\" SELECT name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, switch_type FROM", "rr graph. For ROI configurations, this also connects the synthetic", "not None: tile_name = get_tile_name(phy_tile_pkey) src_net, dest_net = get_pip_wire_names(pip_pkey) if", "= '0' if is_bottom_sing else '1' unchanged_feature = \"{}{}{}{}\".format( m.group(1),", "7-series routing fabric to the rr graph. For ROI configurations,", "rr node id. node_mapping = {} print('{} Creating connection box", "tuple(hilbert_curve.coordinates_from_distance(h)) if coord not in point_map: continue for old_id in", "vcc_loc = None for grid_loc in graph.grid: if gnd_block_id ==", "' '.join((feature, enable_buffer_feature)) # BUFHCE sites are now routed through,", "site_as_tile_pkey, grid_x, grid_y in cur.execute(\"\"\" SELECT pkey, site_as_tile_pkey, grid_x, grid_y", "maybe_get_clk_hrow(i - 1), \"below\": maybe_get_clk_hrow(i + 1), } # Find", "m: return ' '.join( [feature] + find_hclk_cmt_hclk_feature( feature_path[0], m.group(1), m.group(2)", "wire == 'inpad' and num_inpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin(", "= True with open(args.synth_tiles) as f: synth_tiles = json.load(f) roi", "re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+')", "Roi( db=db, x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'], ) print('{} generating routing", "connectivity', required=True ) parser.add_argument( '--synth_tiles', help='If using an ROI, synthetic", "LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\" ), rebuf_nodes(node_pkey) AS ( SELECT DISTINCT node_pkey FROM", "Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value = '1' if is_bottom_sing else", "progressbar_utils import datetime import re import functools import pickle import", "WHERE pkey = ?;\"\"\", (dest_wire_in_tile_pkey, ) ) (dest_net, ) =", "top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey ) = result side = node.loc.side", ") return connection_box_map def yield_nodes(nodes): with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar: for", "FROM node WHERE pkey = ( SELECT node_pkey FROM wire", "conn.cursor() cur2 = conn.cursor() for (graph_node_pkey, track_pkey, graph_node_type, x_low, x_high,", "= hclk_x while hclk_cmt_x < x_max: hclk_cmt_x += 1 gridinfo", "node_idx, node in enumerate(graph.nodes): if node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN):", "of the IOI_SING within the clock region column is_bottom_sing =", "cur.fetchone() switch_id = graph.get_switch_id(switch_name) switch_name_map[switch_pkey] = switch_id else: switch_id =", "-> (CLBLL_L, CLBLL_LL_A1) PIN_NAME_TO_PARTS = re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$') def set_connection_box( graph, node_idx,", "switch_id, ()) else: yield (src_node, sink_node, switch_id, ()) if idx", "progressbar_utils.ProgressBar(max_value=len(nodes)) as bar: for idx, node in enumerate(nodes): yield node", "= node_pkey HCLK_CMT_TILES = {} def populate_hclk_cmt_tiles(db): global HCLK_CMT_TILES HCLK_CMT_TILES", ") ) return cur.fetchone()[0] return get_tile_name def create_get_pip_wire_names(conn): cur =", "switch_name_map: cur = conn.cursor() cur.execute( \"\"\"SELECT name FROM switch WHERE", "ORDER BY rebuf_nodes.node_pkey;\"\"\" ) for node_pkey, rebuf_tile, rebuf_wire_name in cur:", "structure: Add rr_nodes for CHANX and CHANY from the database.", "(graph2.NodeType.IPIN, graph2.NodeType.OPIN): continue gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)] pin_name = graph.pin_ptc_to_name_map[", "specified dimensions in x_min,y_min,x_max,y_max', ) parser.add_argument( '--vpr_capnp_schema_dir', help='Directory container VPR", ") hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"above\"] if hrow_tile is not None: REBUF_NODES[node_pkey].append(", "Y coordinate identified with the # second capture group is", "tile_type, pin['z_loc'], wire ) elif wire == 'outpad' and num_outpad", "['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)] def check_feature(feature): \"\"\" Check if enabling this feature", "HCLK_CMT_TILES = {} def populate_hclk_cmt_tiles(db): global HCLK_CMT_TILES HCLK_CMT_TILES = {}", "+ 1, y_max + 1 def find_constant_network(graph): \"\"\" Find VCC", "import prjxray.db from prjxray.roi import Roi import prjxray.grid as grid", "= CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR' in box: box = 'IMUX' if", "ON wire_in_tile.pkey = wire.wire_in_tile_pkey WHERE wire.node_pkey = ?;\"\"\", (node_pkey, )", "that # changes based on which IOI_SING is selected (top", "x = node.loc.x_low y = node.loc.y_low if (x, y) not", "return results[0] @functools.lru_cache(maxsize=0) def get_site_as_tile_wire(site_as_tile_pkey, pin): site_type_pkey, site_pkey = get_site_from_site_as_tile(", "= re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX =", "parser.add_argument('--part', required=True, help='FPGA part') parser.add_argument( '--read_rr_graph', required=True, help='Input rr_graph file'", "\"\"\" SELECT pkey FROM wire_in_tile WHERE name = ? AND", "tile_type = synth_tile['tile_name'] wire = 'outpad' elif pin['port_type'] == 'output':", "tuple(synth_tile['loc']) ).values() ) assert len(option) > 0, (pin, len(option)) if", "m = HCLK_OUT.fullmatch(feature_path[-1]) if m: return ' '.join( [feature] +", "1), } # Find nodes touching rebuf wires. cur.execute( \"\"\"", "\"\"\" SELECT pkey FROM wire_in_tile WHERE site_pin_pkey = ( SELECT", "pkey FROM wire_in_tile WHERE site_pin_pkey = ( SELECT pkey FROM", "pip_pkey, backward) in enumerate(cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey, switch_pkey, phy_tile_pkey, pip_in_tile_pkey,", "import pickle import sqlite3 now = datetime.datetime.now HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+')", "pip # # Example: IOI_OLOGIC0 -> IOI_OLOGIC1 dst_value = '0'", "graph2.Channels( chan_width_max=chan_width_max, x_min=x_min, y_min=y_min, x_max=x_max, y_max=y_max, x_list=x_list, y_list=y_list, ) def", "parser.add_argument( '--write_rr_node_map', required=True, help='Output map of graph_node_pkey to rr inode", "hclk_number): if (hclk_tile, lr) not in HCLK_CMT_TILES: return [] hclk_cmt_tile", "\"\"\" SELECT name FROM phy_tile WHERE name LIKE \"CLK_HROW_BOT_R_%\" OR", "so # were not emitted in rrgraph XML. # #", "position of the IOI_SING within the clock region column is_bottom_sing", "for idx, info in cur: y_list.append(graph2.ChannelList(idx, info)) return graph2.Channels( chan_width_max=chan_width_max,", "direction = 'Y' y_low = max(y_low, 1) else: assert False,", "correct fasm feature needs to be added. # The relevant", "result: canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1]) track = tracks.Track( direction=direction, x_low=x_low,", "elif pin['port_type'] == 'GND': cur.execute('SELECT gnd_track_pkey FROM constant_sources') (track_pkey, )", "left_graph_node_pkey, right_graph_node_pkey FROM wire WHERE wire_in_tile_pkey = ? AND tile_pkey", "feature_path[0], m.group(1), feature_path[-1] ) return ' '.join((feature, enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]):", "info in cur: y_list.append(graph2.ChannelList(idx, info)) return graph2.Channels( chan_width_max=chan_width_max, x_min=x_min, y_min=y_min,", "graph2 from lib.rr_graph import tracks from lib.connection_database import get_wire_pkey, get_track_model", "BOTTOM IOI_SING # shares bits with the top half of", "= '1' if is_bottom_sing else '0' # This is the", "assert 'SYN-GND' in block_types assert 'SYN-VCC' in block_types gnd_block_id =", "x_low = max(x_low, 1) elif node_type == graph2.NodeType.CHANY: direction =", "= Roi( db=db, x1=x_min, y1=y_min, x2=x_max, y2=y_max, ) else: use_roi", "= \"{}{}{}{}\".format( m.group(1), m.group(2), m.group(3), m.group(4) ) src_wire = m.group(6).replace('_SING',", "tile_pkey = ?;\"\"\", (wire_in_tile_pkey, tile_pkey) ) result = cur.fetchone() assert", "'wire': 'VCC', 'pad': 'VCC', 'port_type': 'VCC', 'is_clock': False, }, ],", "FROM phy_tile ORDER BY grid_y DESC LIMIT 1;\") y_max =", "pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance, resistance", "create_get_pip_wire_names(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_pip_wire_names(pip_pkey): cur.execute( \"\"\"SELECT src_wire_in_tile_pkey,", "WHERE pkey = ( SELECT canon_phy_tile_pkey FROM track WHERE pkey", "= graph.nodes[node_idx]._asdict() node_dict['connection_box'] = graph2.ConnectionBox( x=grid_x, y=grid_y, id=box_id, site_pin_delay=site_pin_delay, )", "if pip_name is not None: feature = check_feature(pip_name) if feature:", "LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\" OR name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\" ), rebuf_nodes(node_pkey) AS (", "= CASCOUT_REGEX.fullmatch(feature_path[-2]) if m: enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1) )", "if its there on the list. \"\"\" tile = rebuf_and_hrow_tiles[i]", "tile-wide, not site-wide). So here we # map the PIP's", "= get_number_graph_edges(conn, graph, node_mapping) print('{} Serializing to disk.'.format(now())) capnp_graph.serialize_to_capnp( channels_obj=channels_obj,", "IOBs. # The IOI_SING are found on top or bottom", "unchanged_feature = \"{}{}{}{}\".format( m.group(1), m.group(2), m.group(3), m.group(4) ) src_wire =", "track_pkey IS NOT NULL;\"\"\")): if track_pkey not in alive_tracks: continue", "graph.maybe_add_connection_box( reduce_connection_box(wire_name) ) return connection_box_map def yield_nodes(nodes): with progressbar_utils.ProgressBar(max_value=len(nodes)) as", "REBUF_NODES = {} REBUF_SOURCES = {} def get_clk_hrow_and_rebuf_tiles_sorted(cur): \"\"\" Finds", "= track_nodes[option[0]] assert track_node in node_mapping, (track_node, track_pkey) if wire", "phy_tile ON phy_tile.pkey = wire.phy_tile_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey", "'{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"above\"] if hrow_tile is not", "connects the synthetic IO tiles to the routing node specified.", "rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name FROM rebuf_nodes INNER JOIN wire ON wire.node_pkey", "id. node_mapping = {} print('{} Creating connection box list'.format(now())) connection_box_map", "help='Limit grid to specified dimensions in x_min,y_min,x_max,y_max', ) parser.add_argument( '--vpr_capnp_schema_dir',", "'B' \"\"\" box = CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR' in box: box", "the value to attach to the IOI_SITE_PIPS names # in", "t in cur.fetchall()] def populate_bufg_rebuf_map(conn): global REBUF_NODES REBUF_NODES = {}", "and \"below\" mean the opposite... rebuf_to_hrow_map = {} for i,", "None if args.synth_tiles: use_roi = True with open(args.synth_tiles) as f:", "wire.wire_in_tile_pkey INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey WHERE wire.wire_in_tile_pkey", "CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX", "# This is the value to attach to the IOI_SITE_PIPS", "FROM rebuf_wires) ) SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name FROM rebuf_nodes INNER", ") );\"\"\" ) connection_box_map = {} for wire_in_tile_pkey, tile_type_pkey, wire_name", "removed once # https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 # is fixed. try: graph.get_switch_id(name) continue", "routed through, without the need of placing them, therefore, #", "'input': tile_type = synth_tile['tile_name'] wire = 'outpad' elif pin['port_type'] ==", "pin['wire']), ) else: assert False, pin def get_switch_name(conn, graph, switch_name_map,", "is None: synth_tiles = find_constant_network(graph) with sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database), uri=True) as conn:", "0 id_map = {} for h in range(hilbert_curve.max_h + 1):", "https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 # is fixed. try: graph.get_switch_id(name) continue except KeyError: capnp_graph.add_switch(", "hilbert_curve = HilbertCurve(p, N) idx = 0 id_map = {}", "json.load(f) roi = Roi( db=db, x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'], )", "\"\") for src_graph_node, dest_graph_node in cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey FROM", "tiles to the routing node specified. Rough structure: Add rr_nodes", "phy_tile WHERE pkey = ( SELECT phy_tile_pkey FROM wire WHERE", "return [] hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)] return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)] def", "? ) AND site_pkey = ? ;\"\"\", (site_type_pkey, pin, site_pkey)", "{ \"VCC\": { 'loc': vcc_loc, 'pins': [ { 'wire': 'VCC',", "file' ) parser.add_argument( '--connection_database', help='Database of fabric connectivity', required=True )", "'IOI_OCLK' in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature = \"{}{}\".format(dst_value,", "'--write_rr_graph', required=True, help='Output rr_graph file' ) parser.add_argument( '--write_rr_node_map', required=True, help='Output", "CLK_HROW tile only if its there on the list. \"\"\"", "grid_y in cur.execute(\"\"\" SELECT pkey, site_as_tile_pkey, grid_x, grid_y FROM tile;\"\"\"):", "cur: if node_pkey not in REBUF_NODES: REBUF_NODES[node_pkey] = [] m", "cur.fetchone()[0] if connection_box_wire_pkey is not None: cur.execute( \"\"\" SELECT grid_x,", "drive_resistance, intrinsic_delay, penalty_cost, switch_type FROM switch;\"\"\"): # Add back missing", "return '' # REBUF stuff rebuf_key = (feature_path[0], feature_path[1]) if", "= get_tile_name(phy_tile_pkey) src_net, dest_net = get_pip_wire_names(pip_pkey) if not backward: pip_name", "== 'BOT': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"above\"] if", "'VCC', 'is_clock': False, }, ], }, \"GND\": { 'loc': gnd_loc,", "is the value to attach to the source pip name", "idx % 1024 == 0: bar.update(idx) def create_channels(conn): cur =", ") ) # Mapping of graph_node.pkey to rr node id.", "node WHERE pkey = ( SELECT node_pkey FROM graph_node WHERE", "# when the relative pip is traversed, the correct fasm", "assert len(results) == 1 wire_in_tile_pkey = results[0][0] return wire_in_tile_pkey return", "try: graph.get_switch_id(name) continue except KeyError: capnp_graph.add_switch( graph2.Switch( id=None, name=name, type=graph2.SwitchType[switch_type.upper()],", "x_dim, y_dim = phy_grid_dims(conn) connection_box_obj = graph.create_connection_box_object( x_dim=x_dim, y_dim=y_dim )", "reduce_connection_box(wire_name) ) return connection_box_map def yield_nodes(nodes): with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar:", "\"\"\"SELECT name FROM wire_in_tile WHERE pkey = ?;\"\"\", (dest_wire_in_tile_pkey, )", "None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) elif m.group(2) ==", "'--connection_database', help='Database of fabric connectivity', required=True ) parser.add_argument( '--synth_tiles', help='If", "present from the input rr_graph. Create a mapping between database", "x_max = cur.fetchone()[0] cur.execute(\"SELECT grid_y FROM phy_tile ORDER BY grid_y", "active the IOB operates in the differential output mode. #", "# need, as the IOI_SING tiles have the same wire,", "rr_nodes for CHANX and CHANY from the database. IPIN and", "y_low = max(y_low, 1) else: assert False, node_type canonical_loc =", "pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net) else: pip_name = None switch_id", "idx, info FROM x_list;') x_list = [] for idx, info", "Note that in # VPR coords terms. \"above\" and \"below\"", "id_map[old_id] = idx idx += 1 del point_map[coord] return lambda", "import datetime import re import functools import pickle import sqlite3", "None assert vcc_loc is not None synth_tiles = { 'tiles':", ") continue m = PIN_NAME_TO_PARTS.match(pin_name) assert m is not None,", "get_pip_wire_names def get_number_graph_edges(conn, graph, node_mapping): num_edges = len(graph.edges) print('{} Counting", "despite they are found on the TOP or BOTTOM of", "= {} REBUF_SOURCES = {} def get_clk_hrow_and_rebuf_tiles_sorted(cur): \"\"\" Finds all", "side == tracks.Direction.RIGHT: assert right_graph_node_pkey is not None, (tile_type, pin_name)", "name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\" OR name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\" ), rebuf_nodes(node_pkey) AS", "'1' if is_bottom_sing else '0' # This is the value", "site_as_tile.site_pkey WHERE site_as_tile.pkey = ?\"\"\", (site_as_tile_pkey, ) ) results =", "= ? AND name = ? ) AND site_pkey =", "graph.nodes[node_idx] = graph2.Node(**node_dict) def update_connection_box( conn, graph, graph_node_pkey, node_idx, connection_box_map", "PIN_NAME_TO_PARTS = re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$') def set_connection_box( graph, node_idx, grid_x, grid_y, box_id,", "XML. \"\"\" import argparse import os.path from hilbertcurve.hilbertcurve import HilbertCurve", "conn.cursor() cur.execute( \"\"\" SELECT connection_box_wire_pkey FROM graph_node WHERE pkey =", "track_pkey not in alive_tracks: continue cur2.execute( \"\"\" SELECT name FROM", "info FROM x_list;') x_list = [] for idx, info in", "def create_connection_boxes(conn, graph): \"\"\" Assign connection box ids for all", "CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK = re.compile('IOI_OCLK_([01])') #", "phy_tile ON phy_tile.pkey = wire.phy_tile_pkey WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey", "set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ): \"\"\" Assign", "{ 'wire': 'VCC', 'pad': 'VCC', 'port_type': 'VCC', 'is_clock': False, },", "get_tile_and_site_as_tile_pkey def create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0) def get_site_from_site_as_tile(site_as_tile_pkey): cur.execute( \"\"\" SELECT site.site_type_pkey,", "if needed. \"\"\" cur = conn.cursor() cur.execute( \"\"\" SELECT connection_box_wire_pkey", "= 2 p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max))) point_map = {} for", "alive_tracks.add(track_pkey) print('{} Importing alive tracks'.format(now())) import_tracks(conn, alive_tracks, node_mapping, graph, segment_id)", "phy_tile.name, wire_in_tile.name FROM rebuf_nodes INNER JOIN wire ON wire.node_pkey =", "def create_channels(conn): cur = conn.cursor() cur.execute( \"\"\" SELECT chan_width_max, x_min,", "'GND', 'port_type': 'GND', 'is_clock': False, }, ], }, } }", ")\"\"\", (track_pkey, ) ) result = cur2.fetchone() if result is", "i, tile_name in enumerate(rebuf_and_hrow_tiles): if tile_name is not None and", "y_list=y_list, ) def create_connection_boxes(conn, graph): \"\"\" Assign connection box ids", ") (dest_net, ) = cur.fetchone() return (src_net, dest_net) return get_pip_wire_names", "rebuf_wire_name in cur: if node_pkey not in REBUF_NODES: REBUF_NODES[node_pkey] =", ") ) site_pin_delay = cur.fetchone()[0] set_connection_box( graph, node_idx, grid_x, grid_y,", "if node_type == graph2.NodeType.CHANX: direction = 'X' x_low = max(x_low,", "x_high=x_high, y_low=y_low, y_high=y_high, ) assert graph_node_pkey not in node_mapping node_mapping[graph_node_pkey]", "name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\" ), rebuf_nodes(node_pkey) AS ( SELECT DISTINCT node_pkey", "connection_box_map ) elif side == tracks.Direction.BOTTOM: assert bottom_graph_node_pkey is not", "= synth_tile['tile_name'] wire = 'outpad' elif pin['port_type'] == 'output': tile_type", "> 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, (pin['z_loc'] - num_inpad), wire", "= switch_id else: switch_id = switch_name_map[switch_pkey] return switch_id def create_get_tile_name(conn):", "wire WHERE pkey = ? )\"\"\", (connection_box_wire_pkey, ) ) grid_x,", "block_types assert 'SYN-VCC' in block_types gnd_block_id = block_types['SYN-GND'] vcc_block_id =", ") # Mapping of graph_node.pkey to rr node id. node_mapping", "# Match site pins rr nodes with graph_node's in the", "(num_channels, ) = cur.fetchone() print('{} Import alive tracks'.format(now())) alive_tracks =", "connection_box_wire_pkey is not None: cur.execute( \"\"\" SELECT grid_x, grid_y FROM", "pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']: if pip in", "graph, default_segment_id): cur = conn.cursor() cur2 = conn.cursor() for (graph_node_pkey,", "ON wire.node_pkey = rebuf_nodes.node_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey =", "option = list( tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc']) ).values() ) assert len(option) >", "be enabled. Some pips imply other features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies:", "a pseudo-pip we simply reject fasm # features here. if", "{} for tile_pkey, site_as_tile_pkey, grid_x, grid_y in cur.execute(\"\"\" SELECT pkey,", "= (grid_loc.x, grid_loc.y) if vcc_block_id == grid_loc.block_type_id: assert vcc_loc is", "not None: REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) else: assert", "grid to specified dimensions in x_min,y_min,x_max,y_max', ) parser.add_argument( '--vpr_capnp_schema_dir', help='Directory", "not in node_mapping: continue if pip_pkey is not None: tile_name", "assert False, node_type canonical_loc = None cur2.execute( \"\"\" SELECT grid_x,", "Creating channels.'.format(now())) channels_obj = create_channels(conn) node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj) x_dim,", "cur = conn.cursor() cur.execute(\"\"\"SELECT count(*) FROM track;\"\"\") (num_channels, ) =", "penalty_cost, switch_type FROM switch;\"\"\"): # Add back missing switchs, which", "in rrgraph XML. # # TODO: This can be removed", "WHERE pkey = ?;\"\"\", (src_wire_in_tile_pkey, ) ) (src_net, ) =", "grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) if gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']:", "These connections are hard wires that connect IOB33M and IOB33S", "# TODO: This can be removed once # https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 #", "idx = 0 id_map = {} for h in range(hilbert_curve.max_h", "'--graph_limit', help='Limit grid to specified dimensions in x_min,y_min,x_max,y_max', ) parser.add_argument(", "gridinfo = grid.gridinfo_at_tilename(tile) if gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']: continue", "yield (src_node, sink_node, switch_id, ()) else: yield (src_node, sink_node, switch_id,", "HCLK_CMT_TILES[(hclk_tile, lr)] return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)] def check_feature(feature): \"\"\" Check if", "ROI.'.format(now())) elif args.graph_limit: use_roi = True x_min, y_min, x_max, y_max", "cannot handle duplicate paths with the same switch id. print('{}", "== 'HCLK_CMT_L': HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break", "feature_path[1]) if rebuf_key in REBUF_SOURCES: return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]])", "delayless_switch = graph.get_switch_id('__vpr_delayless_switch__') for tile_name, synth_tile in synth_tiles['tiles'].items(): num_inpad =", "connection_database. print('{} Importing graph nodes'.format(now())) import_graph_nodes(conn, graph, node_mapping, connection_box_map) #", "tile return None # Assign each REBUF tile its above", "IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value = '1' if is_bottom_sing else '0' # This", "map the PIP's feature to \"DIFF_OUT\" if feature_path[2] == \"IOB_DIFFO_OUT0\"", "the IOI tiles. # # The difference is that the", "get_site_from_site_as_tile(site_as_tile_pkey): cur.execute( \"\"\" SELECT site.site_type_pkey, site_as_tile.site_pkey FROM site_as_tile INNER JOIN", "import_tracks(conn, alive_tracks, node_mapping, graph, segment_id) print('original {} final {}'.format(num_channels, len(alive_tracks)))", "WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ) SELECT rebuf_nodes.node_pkey,", "= ? AND tile_pkey = ?;\"\"\", (wire_in_tile_pkey, tile_pkey) ) result", "ON site.pkey = site_as_tile.site_pkey WHERE site_as_tile.pkey = ?\"\"\", (site_as_tile_pkey, )", "= feature.split('.') wire_feature = feature_when_routed(parts[1]) if wire_feature is not None:", "the # second capture group is dived by 50 to", "as conn: populate_bufg_rebuf_map(conn) cur = conn.cursor() for name, internal_capacitance, drive_resistance,", "mode. # # Vivado does not report this connection as", "BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature,", "graph, top_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.BOTTOM: assert", "x_loc_str: x_loc = 1 else: assert False, \"Impossible to determine", "graph2.CanonicalLoc(x=result[0], y=result[1]) track = tracks.Track( direction=direction, x_low=x_low, x_high=x_high, y_low=y_low, y_high=y_high,", "elif args.graph_limit: use_roi = True x_min, y_min, x_max, y_max =", "re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK = re.compile('IOI_OCLK_([01])') # Regex for [LR]IOI_SING tiles IOI_SITE_PIPS", "in the prjxray db but there # is a tile-wide", "id=None, name=name, type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming( r=drive_resistance, c_in=0.0, c_out=0.0, c_internal=internal_capacitance, t_del=intrinsic_delay, p_cost=penalty_cost,", "y=result[1]) track = tracks.Track( direction=direction, x_low=x_low, x_high=x_high, y_low=y_low, y_high=y_high, )", "populate_hclk_cmt_tiles(db): global HCLK_CMT_TILES HCLK_CMT_TILES = {} grid = db.grid() _,", "IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value = '1' if is_bottom_sing else '0'", "None vcc_loc = (grid_loc.x, grid_loc.y) assert gnd_loc is not None", "{} def populate_hclk_cmt_tiles(db): global HCLK_CMT_TILES HCLK_CMT_TILES = {} grid =", "pin['port_type'] == 'GND': cur.execute('SELECT gnd_track_pkey FROM constant_sources') (track_pkey, ) =", "LIMIT 1;\") y_max = cur.fetchone()[0] return x_max + 1, y_max", "missing switchs, which were unused in arch xml, and so", ") features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return ' '.join(features) m = HCLK_OUT.fullmatch(feature_path[-1]) if", "== 1 wire_in_tile_pkey = results[0][0] tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1])", "the need of placing them, therefore, # when the relative", "wire_in_tile_pkey = cur.fetchone()[0] box_id = connection_box_map[wire_in_tile_pkey] cur.execute( \"\"\" SELECT switch.intrinsic_delay", "SELECT pkey FROM wire_in_tile WHERE name = ? AND phy_tile_type_pkey", "cur = conn.cursor() cur2 = conn.cursor() for (graph_node_pkey, track_pkey, graph_node_type,", "ORDER BY grid_y DESC LIMIT 1;\") y_max = cur.fetchone()[0] return", ") return ' '.join((feature, enable_buffer_feature)) # BUFHCE sites are now", "cur.execute( \"\"\" SELECT phy_tile.name, wire_in_tile.name FROM wire INNER JOIN phy_tile", "\"\"\"SELECT name FROM wire_in_tile WHERE pkey = ?;\"\"\", (src_wire_in_tile_pkey, )", "help='FPGA part') parser.add_argument( '--read_rr_graph', required=True, help='Input rr_graph file' ) parser.add_argument(", "REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m = IOI_OCLK.fullmatch(feature_path[1]) if m: enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format( feature_path[0],", "IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ) SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name", "switch_name_map = {} print('{} Importing edges from database.'.format(now())) with progressbar_utils.ProgressBar(max_value=num_edges)", "del point_map[coord] return lambda x: id_map[x] def main(): parser =", "rebuf_key in REBUF_SOURCES: return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m =", "grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break hclk_cmt_x = hclk_x while hclk_cmt_x", "{ 'wire': 'GND', 'pad': 'GND', 'port_type': 'GND', 'is_clock': False, },", "re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+') BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+')", "'output': tile_type = synth_tile['tile_name'] wire = 'inpad' elif pin['port_type'] ==", "tile_type = 'SYN-VCC' wire = 'VCC' elif pin['port_type'] == 'GND':", "elif wire == 'outpad' and num_outpad > 1: pin_name =", "import argparse import os.path from hilbertcurve.hilbertcurve import HilbertCurve import math", "site_pin_delay = cur.fetchone()[0] set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay", "(tile_type, pin_name) node_mapping[bottom_graph_node_pkey] = node.id update_connection_box( conn, graph, bottom_graph_node_pkey, node_idx,", "yield_nodes(nodes): with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar: for idx, node in enumerate(nodes):", "track=track, segment_id=segment_id, ptc=ptc, timing=graph2.NodeTiming( r=resistance, c=capacitance, ), canonical_loc=canonical_loc ) def", "wire = 'GND' else: assert False, pin track_node = track_nodes[option[0]]", "wire_name)] = node_pkey HCLK_CMT_TILES = {} def populate_hclk_cmt_tiles(db): global HCLK_CMT_TILES", "graph_node_pkey not in node_mapping node_mapping[graph_node_pkey] = graph.add_track( track=track, segment_id=segment_id, ptc=ptc,", ") ) result = cur2.fetchone() if result: canonical_loc = graph2.CanonicalLoc(x=result[0],", "= conn.cursor() # Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles. rebuf_and_hrow_tiles", ") parser.add_argument('--part', required=True, help='FPGA part') parser.add_argument( '--read_rr_graph', required=True, help='Input rr_graph", "sets the CE signal to constant 1 m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1])", "== 'output': tile_type = synth_tile['tile_name'] wire = 'inpad' elif pin['port_type']", "return graph2.Channels( chan_width_max=chan_width_max, x_min=x_min, y_min=y_min, x_max=x_max, y_max=y_max, x_list=x_list, y_list=y_list, )", "else: assert False, \"Impossible to determine X location of BUFHCE\"", "' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]]) m = IOI_OCLK.fullmatch(feature_path[1]) if m: enable_oclkm_feature", "), sizing=graph2.SwitchSizing( mux_trans_size=0, buf_size=0, ), ) ) # Mapping of", "in # VPR coords terms. \"above\" and \"below\" mean the", "with the top half of a normal IOI TILE. #", "get_tile_name(phy_tile_pkey) src_net, dest_net = get_pip_wire_names(pip_pkey) if not backward: pip_name =", "ORDER BY grid_x DESC LIMIT 1;\") x_max = cur.fetchone()[0] cur.execute(\"SELECT", "@functools.lru_cache(maxsize=0) def get_site_from_site_as_tile(site_as_tile_pkey): cur.execute( \"\"\" SELECT site.site_type_pkey, site_as_tile.site_pkey FROM site_as_tile", "in enumerate(rebuf_and_hrow_tiles): if tile_name is not None and tile_name.startswith(\"CLK_BUFG_REBUF\"): rebuf_to_hrow_map[tile_name]", "def get_site_from_site_as_tile(site_as_tile_pkey): cur.execute( \"\"\" SELECT site.site_type_pkey, site_as_tile.site_pkey FROM site_as_tile INNER", "= ?\"\"\", (site_as_tile_pkey, ) ) results = cur.fetchall() assert len(results)", "x_min=x_min, y_min=y_min, x_max=x_max, y_max=y_max, x_list=x_list, y_list=y_list, ) def create_connection_boxes(conn, graph):", "not in REBUF_NODES: REBUF_NODES[node_pkey] = [] m = rebuf_wire_regexp.fullmatch(rebuf_wire_name) if", "connected to VCC. # This sets the CE signal to", "num_inpad), wire ) else: pin_name = graph.create_pin_name_from_tile_type_and_pin( tile_type, wire )", "graph for the SYN-GND and SYN-VCC tiles. \"\"\" block_types =", "if (hclk_tile, lr) not in HCLK_CMT_TILES: return [] hclk_cmt_tile =", "'input': graph.add_edge( src_node=node_mapping[track_node], sink_node=pin_node[0][0], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) elif pin['port_type']", "# The IOI_SING are found on top or bottom of", "_ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1]) cur.execute( \"\"\" SELECT top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey,", ") for node_pkey, rebuf_tile, rebuf_wire_name in cur: if node_pkey not", "= math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max))) point_map = {} for node in nodes:", "no feature assosciated with that PIP in the prjxray db", "get_pip_wire_names(pip_pkey): cur.execute( \"\"\"SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey FROM pip_in_tile WHERE pkey =", ") else: pin_name = graph.create_pin_name_from_tile_type_and_pin( tile_type, wire ) pin_node =", "if 'L' in x_loc_str: x_loc = 0 elif 'R' in", "if pin['port_type'] in ['input', 'output']: wire_pkey = get_wire_pkey(conn, tile_name, pin['wire'])", "on the TOP or BOTTOM of an IOI column m", "phy_tile_pkey, pip_pkey, backward) in enumerate(cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey, switch_pkey, phy_tile_pkey,", "graph): \"\"\" Assign connection box ids for all connection box", "database graph_nodes and IPIN, OPIN, CHANX and CHANY rr_node ids", "vcc_loc = (grid_loc.x, grid_loc.y) assert gnd_loc is not None assert", "shares bits with the top half of a normal IOI", "return '{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0 # # These connections", "in REBUF_NODES: cur.execute( \"\"\" SELECT phy_tile.name, wire_in_tile.name FROM wire INNER", "box REBUF_NODES = {} REBUF_SOURCES = {} def get_clk_hrow_and_rebuf_tiles_sorted(cur): \"\"\"", "features are: # - IN_USE: to enable the BUFHCE site", "update_connection_box( conn, graph, right_graph_node_pkey, node_idx, connection_box_map ) elif side ==", "input mode. # # Vivado does not report this connection", "cur2.execute( \"\"\" SELECT grid_x, grid_y FROM phy_tile WHERE pkey =", "cur.fetchone()[0] cur.execute(\"SELECT grid_y FROM phy_tile ORDER BY grid_y DESC LIMIT", "SELECT switch.intrinsic_delay FROM switch WHERE pkey = ( SELECT site_pin_switch_pkey", "dived by 50 to get the relative # position of", "# IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # # When this PIP is active the", "for wire_in_tile_pkey, tile_type_pkey, wire_name in cur: connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box( reduce_connection_box(wire_name)", "node_mapping): num_edges = len(graph.edges) print('{} Counting edges.'.format(now())) cur = conn.cursor()", "pin): site_type_pkey, site_pkey = get_site_from_site_as_tile( site_as_tile_pkey ) cur.execute( \"\"\" SELECT", "the rr_graph. Add rr_edge for each row in the graph_edge", "row in the graph_edge table. Import channel XML node from", "attach to the source pip name that # changes based", "blocks are handled below. if pin_name.startswith('SYN-'): set_connection_box( graph, node_idx, node.loc.x_low,", "BY grid_x DESC LIMIT 1;\") x_max = cur.fetchone()[0] cur.execute(\"SELECT grid_y", "re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK = re.compile('IOI_OCLK_([01])') # Regex for", "node_idx, grid_x, grid_y, box_id, site_pin_delay ): \"\"\" Assign a connection", "= max(y_low, 1) else: assert False, node_type canonical_loc = None", "'R' in x_loc_str: x_loc = 1 else: assert False, \"Impossible", "# Add back missing switchs, which were unused in arch", "print('{} Importing existing edges.'.format(now())) for edge in graph.edges: yield (edge.src_node,", "= set() for (track_pkey, ) in cur.execute(\"SELECT pkey FROM track", "input rr graph for the SYN-GND and SYN-VCC tiles. \"\"\"", ") site_pin_delay = cur.fetchone()[0] set_connection_box( graph, node_idx, grid_x, grid_y, box_id,", "HCLK_CMT_TILES = {} grid = db.grid() _, x_max, _, _", "feature_path[-1] ) return ' '.join((feature, enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature =", "set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay ) def create_get_tile_and_site_as_tile_pkey(cur):", "return tile return None # Assign each REBUF tile its", "None) # Then yield edges from database. cur = conn.cursor()", "bottom_graph_node_pkey, node_idx, connection_box_map ) else: assert False, side def import_tracks(conn,", "segment_id = default_segment_id node_type = graph2.NodeType(graph_node_type) if node_type == graph2.NodeType.CHANX:", "tile;\"\"\"): tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey) def get_tile_and_site_as_tile_pkey(x, y): return", ") else: yield (src_node, sink_node, switch_id, ()) else: yield (src_node,", "as bar: for idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey, pip_pkey, backward)", "cur.execute('SELECT gnd_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone() else: assert", "WHERE pkey = ?;\"\"\", (pip_pkey, ) ) src_wire_in_tile_pkey, dest_wire_in_tile_pkey =", "None gnd_loc = (grid_loc.x, grid_loc.y) if vcc_block_id == grid_loc.block_type_id: assert", "= argparse.ArgumentParser() parser.add_argument( '--db_root', required=True, help='Project X-Ray Database' ) parser.add_argument('--part',", "= {} for wire_in_tile_pkey, tile_type_pkey, wire_name in cur: connection_box_map[wire_in_tile_pkey] =", "def create_get_tile_and_site_as_tile_pkey(cur): tiles = {} for tile_pkey, site_as_tile_pkey, grid_x, grid_y", "x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'], ) print('{} generating routing graph for ROI.'.format(now())) elif", "pin_name) node_mapping[top_graph_node_pkey] = node.id update_connection_box( conn, graph, top_graph_node_pkey, node_idx, connection_box_map", "bar: for idx, node in enumerate(nodes): yield node if idx", "get_tile_and_site_as_tile_pkey(x, y): return tiles[(x, y)] return get_tile_and_site_as_tile_pkey def create_get_site_as_tile_wire(cur): @functools.lru_cache(maxsize=0)", ") (switch_name, ) = cur.fetchone() switch_id = graph.get_switch_id(switch_name) switch_name_map[switch_pkey] =", "pkey, site_as_tile_pkey, grid_x, grid_y FROM tile;\"\"\"): tiles[(grid_x, grid_y)] = (tile_pkey,", "False, pin def get_switch_name(conn, graph, switch_name_map, switch_pkey): assert switch_pkey is", "= False roi = None synth_tiles = None capnp_graph =", "DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN ( SELECT connection_box_wire_pkey", "conn, graph, left_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.RIGHT:", "pin['port_type'] in ['VCC', 'GND', 'output']: graph.add_edge( src_node=pin_node[0][0], sink_node=node_mapping[track_node], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name,", "datetime import re import functools import pickle import sqlite3 now", "x=grid_x, y=grid_y, id=box_id, site_pin_delay=site_pin_delay, ) graph.nodes[node_idx] = graph2.Node(**node_dict) def update_connection_box(", "y_min=y_min, x_max=x_max, y_max=y_max, x_list=x_list, y_list=y_list, ) def create_connection_boxes(conn, graph): \"\"\"", "and tile_name.startswith(\"CLK_BUFG_REBUF\"): rebuf_to_hrow_map[tile_name] = { \"above\": maybe_get_clk_hrow(i - 1), \"below\":", "cur: connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box( reduce_connection_box(wire_name) ) return connection_box_map def yield_nodes(nodes):", "idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey, pip_pkey, backward) in enumerate(cur.execute(\"\"\" SELECT", "# https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354 # is fixed. try: graph.get_switch_id(name) continue except KeyError:", "continue except KeyError: capnp_graph.add_switch( graph2.Switch( id=None, name=name, type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming( r=drive_resistance,", ")\"\"\", (track_pkey, ) ) result = cur2.fetchone() if result: canonical_loc", "= {} print('{} Importing edges from database.'.format(now())) with progressbar_utils.ProgressBar(max_value=num_edges) as", "import lib.rr_graph_capnp.graph2 as capnp_graph2 from prjxray_constant_site_pins import feature_when_routed from prjxray_tile_import", ") def create_get_tile_and_site_as_tile_pkey(cur): tiles = {} for tile_pkey, site_as_tile_pkey, grid_x,", "site_as_tile_pkey, grid_x, grid_y FROM tile;\"\"\"): tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey)", "{} REBUF_SOURCES = {} def get_clk_hrow_and_rebuf_tiles_sorted(cur): \"\"\" Finds all CLK_HROW_TOP_R,", "@functools.lru_cache(maxsize=0) def get_site_as_tile_wire(site_as_tile_pkey, pin): site_type_pkey, site_pkey = get_site_from_site_as_tile( site_as_tile_pkey )", "# IOI/IOB column. The Y coordinate identified with the #", "on CE input which is connected to VCC. # This", "== 'outpad' and num_outpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type,", "in ['VCC', 'GND', 'output']: graph.add_edge( src_node=pin_node[0][0], sink_node=node_mapping[track_node], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']),", "else: assert False, pin def get_switch_name(conn, graph, switch_name_map, switch_pkey): assert", "WHERE wire.node_pkey = ?;\"\"\", (node_pkey, ) ) for tile, wire_name", "with the IOI tiles. # # The difference is that", "nodes: x = node.loc.x_low y = node.loc.y_low if (x, y)", "ends of the list to simplify the code below. rebuf_and_hrow_tiles", "its there on the list. \"\"\" tile = rebuf_and_hrow_tiles[i] if", "pkey = ?; \"\"\", (tile_pkey, ) ) return cur.fetchone()[0] return", "cur = conn.cursor() cur.execute( \"\"\" SELECT pkey, tile_type_pkey, name FROM", "not in node_mapping: continue num_edges += 1 return num_edges def", "block_types = {} for block_type in graph.block_types: block_types[block_type.name] = block_type.id", "connection_box_map) # Walk all track graph nodes and add them.", "the differential output mode. # There is no feature assosciated", "(src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey, pip_pkey, backward) in enumerate(cur.execute(\"\"\" SELECT src_graph_node_pkey,", "switch_id, (('fasm_features', feature), ) ) else: yield (src_node, sink_node, switch_id,", "break def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number): if (hclk_tile, lr) not in", "m.group(1), m.group(2) ) ) m = CASCOUT_REGEX.fullmatch(feature_path[-2]) if m: enable_cascout", "tile_type, wire ) pin_node = graph.get_nodes_for_pin( tuple(synth_tile['loc']), pin_name ) if", "get_number_graph_edges(conn, graph, node_mapping): num_edges = len(graph.edges) print('{} Counting edges.'.format(now())) cur", "or BOTTOM of an IOI column m = IOI_SING_REGEX.fullmatch(feature) if", "cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_pip_wire_names(pip_pkey): cur.execute( \"\"\"SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey", "their Y coordinates. \"\"\" cur.execute( \"\"\" SELECT name FROM phy_tile", "in differential input mode. # # Vivado does not report", "x_max=x_max, y_max=y_max, x_list=x_list, y_list=y_list, ) def create_connection_boxes(conn, graph): \"\"\" Assign", "'GND': cur.execute('SELECT gnd_track_pkey FROM constant_sources') (track_pkey, ) = cur.fetchone() else:", "reject fasm # features here. if feature_path[2] == \"IOB_PADOUT0\" and", "? ;\"\"\", (site_type_pkey, pin, site_pkey) ) results = cur.fetchall() assert", "= ? )\"\"\", (track_pkey, ) ) result = cur2.fetchone() if", "feature_path[-1] ) return ' '.join((feature, enable_buffer_feature)) # BUFHCE sites are", "= node.id update_connection_box( conn, graph, bottom_graph_node_pkey, node_idx, connection_box_map ) else:", "'IMUX' >>> reduce_connection_box('IMUX10') 'IMUX' >>> reduce_connection_box('BRAM_ADDR') 'IMUX' >>> reduce_connection_box('A_L10') 'A'", "datetime.datetime.now HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)')", "= 'GND' else: assert False, pin track_node = track_nodes[option[0]] assert", "<reponame>FireFox317/symbiflow-arch-defs<filename>xc/common/utils/prjxray_routing_import.py #!/usr/bin/env python3 \"\"\" Imports 7-series routing fabric to the", "connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges, edges_obj=import_graph_edges(conn, graph, node_mapping), node_remap=node_remap, ) for", "tiles have the same wire, and pip names # despite", "= int(m.group(2)) % 50 == 0 # This is the", "edges have been sent to # VPR. VPR cannot handle", "tile only if its there on the list. \"\"\" tile", "conn, graph, right_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.TOP:", "if 'BRAM_ADDR' in box: box = 'IMUX' if box.endswith('_L'): box", "= ?; \"\"\", (tile_pkey, ) ) return cur.fetchone()[0] return get_tile_name", "connection_box_map = create_connection_boxes(conn, graph) # Match site pins rr nodes", "pkey = ( SELECT node_pkey FROM wire WHERE pkey =", "on both ends of the list to simplify the code", "== grid_loc.block_type_id: assert gnd_loc is None gnd_loc = (grid_loc.x, grid_loc.y)", "= re.compile(r'^([^\\.]+)\\.([^\\]]+)\\[0\\]$') def set_connection_box( graph, node_idx, grid_x, grid_y, box_id, site_pin_delay", "CLK_HROW tile. Note that in # VPR coords terms. \"above\"", "num_inpad = len( list( filter( lambda t: t['port_type'] == 'output',", "= conn.cursor() cur.execute( \"\"\"SELECT name FROM switch WHERE pkey =", "return '' if feature_path[2] == \"IOB_PADOUT1\" and feature_path[1] == \"IOB_DIFFI_IN0\":", "\"\"\" SELECT name FROM phy_tile WHERE pkey = ?; \"\"\",", "continue if dest_graph_node not in node_mapping: continue num_edges += 1", "arches should have these synthetic tiles, search the input rr", "\"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\" ), rebuf_nodes(node_pkey) AS ( SELECT DISTINCT node_pkey FROM wire", "the PIP's feature to \"DIFF_OUT\" if feature_path[2] == \"IOB_DIFFO_OUT0\" and", "SELECT pkey FROM wire_in_tile WHERE site_pin_pkey = ( SELECT pkey", "y_low=y_low, y_high=y_high, ) assert graph_node_pkey not in node_mapping node_mapping[graph_node_pkey] =", "switch_id else: switch_id = switch_name_map[switch_pkey] return switch_id def create_get_tile_name(conn): cur", "@functools.lru_cache(maxsize=None) def get_pip_wire_names(pip_pkey): cur.execute( \"\"\"SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey FROM pip_in_tile WHERE", "bar.update(idx) def phy_grid_dims(conn): \"\"\" Returns physical grid dimensions. \"\"\" cur", "grid dimensions. \"\"\" cur = conn.cursor() cur.execute(\"SELECT grid_x FROM phy_tile", "'loc': vcc_loc, 'pins': [ { 'wire': 'VCC', 'pad': 'VCC', 'port_type':", "sent to # VPR. VPR cannot handle duplicate paths with", "LIKE \"CLK_HROW_TOP_R_%\" OR name LIKE \"CLK_BUFG_REBUF_%\" ORDER BY grid_y DESC;", "LIKE \"CLK_BUFG_REBUF_%\" ORDER BY grid_y DESC; \"\"\" ) return [t[0]", ") return ' '.join((feature, enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features = [feature]", "[ { 'wire': 'GND', 'pad': 'GND', 'port_type': 'GND', 'is_clock': False,", "{} print('{} Creating connection box list'.format(now())) connection_box_map = create_connection_boxes(conn, graph)", "INNER JOIN site ON site.pkey = site_as_tile.site_pkey WHERE site_as_tile.pkey =", "edges_obj=import_graph_edges(conn, graph, node_mapping), node_remap=node_remap, ) for k in node_mapping: node_mapping[k]", "box list'.format(now())) connection_box_map = create_connection_boxes(conn, graph) # Match site pins", "in the hierarchy (its tile-wide, not site-wide). So here we", "'is_clock': False, }, ], }, } } return synth_tiles def", "cur.execute( \"\"\" SELECT top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey FROM wire WHERE", "second capture group is dived by 50 to get the", "SELECT name FROM phy_tile WHERE pkey = ?; \"\"\", (tile_pkey,", "tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey) def get_tile_and_site_as_tile_pkey(x, y): return tiles[(x,", "alive_tracks, node_mapping, graph, segment_id) print('original {} final {}'.format(num_channels, len(alive_tracks))) def", "in node_mapping, (track_node, track_pkey) if wire == 'inpad' and num_inpad", "= graph.maybe_add_connection_box( reduce_connection_box(wire_name) ) return connection_box_map def yield_nodes(nodes): with progressbar_utils.ProgressBar(max_value=len(nodes))", "Mapping of graph_node.pkey to rr node id. node_mapping = {}", "return ' '.join((feature, enable_cascout)) parts = feature.split('.') wire_feature = feature_when_routed(parts[1])", "the value to attach to the source pip name that", "hclk_cmt_y = hclk_y while hclk_cmt_x > 0: hclk_cmt_x -= 1", "connection box ids for all connection box types. \"\"\" cur", "= \"{}{}\".format(unchanged_feature, changed_feature) feature_path = feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # #", "import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id): cur = conn.cursor() cur2 =", "SELECT phy_tile_pkey FROM tile_map WHERE tile_pkey = ? ) );\"\"\",", "block_type in graph.block_types: block_types[block_type.name] = block_type.id assert 'SYN-GND' in block_types", "\"VCC\": { 'loc': vcc_loc, 'pins': [ { 'wire': 'VCC', 'pad':", "continue for old_id in point_map[coord]: id_map[old_id] = idx idx +=", "get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1]) cur.execute( \"\"\" SELECT top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey FROM", "cur.fetchone() cur.execute( \"\"\"SELECT name FROM wire_in_tile WHERE pkey = ?;\"\"\",", "Writing node map.'.format(now())) with open(args.write_rr_node_map, 'wb') as f: pickle.dump(node_mapping, f)", "x_loc_str = m.group(1) if 'L' in x_loc_str: x_loc = 0", "cur = conn.cursor() cur.execute( \"\"\" SELECT connection_box_wire_pkey FROM graph_node WHERE", "prjxray.roi import Roi import prjxray.grid as grid from lib.rr_graph import", "wire = 'outpad' elif pin['port_type'] == 'output': tile_type = synth_tile['tile_name']", "is not None synth_tiles = { 'tiles': { \"VCC\": {", "channels_obj = create_channels(conn) node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj) x_dim, y_dim =", "pin['wire']), ) elif pin['port_type'] in ['VCC', 'GND', 'output']: graph.add_edge( src_node=pin_node[0][0],", "node_pkey HCLK_CMT_TILES = {} def populate_hclk_cmt_tiles(db): global HCLK_CMT_TILES HCLK_CMT_TILES =", "NULL ) );\"\"\" ) connection_box_map = {} for wire_in_tile_pkey, tile_type_pkey,", "node in nodes: x = node.loc.x_low y = node.loc.y_low if", "cur.fetchone()[0] return get_tile_name def create_get_pip_wire_names(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def", "cur.execute('SELECT idx, info FROM y_list;') y_list = [] for idx,", "SELECT site_pin_switch_pkey FROM wire_in_tile WHERE pkey = ( SELECT wire_in_tile_pkey", "sink_node=node_mapping[track_node], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) else: assert False, pin def", "name that # changes based on which IOI_SING is selected", "phy_tile WHERE pkey IN ( SELECT phy_tile_pkey FROM tile_map WHERE", "idx, info in cur: x_list.append(graph2.ChannelList(idx, info)) cur.execute('SELECT idx, info FROM", "constant_sources') (track_pkey, ) = cur.fetchone() elif pin['port_type'] == 'GND': cur.execute('SELECT", "segment_name = result[0] segment_id = graph.get_segment_id_from_name(segment_name) else: segment_id = default_segment_id", "= phy_grid_dims(conn) connection_box_obj = graph.create_connection_box_object( x_dim=x_dim, y_dim=y_dim ) num_edges =", "-= 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT':", "N) idx = 0 id_map = {} for h in", "tile_name, synth_tile in synth_tiles['tiles'].items(): num_inpad = len( list( filter( lambda", "import tracks from lib.connection_database import get_wire_pkey, get_track_model import lib.rr_graph_capnp.graph2 as", "= ?;\"\"\", (src_wire_in_tile_pkey, ) ) (src_net, ) = cur.fetchone() cur.execute(", "with the same switch id. print('{} Adding synthetic edges'.format(now())) add_synthetic_edges(conn,", "= cur.fetchone() get_tile_name = create_get_tile_name(conn) get_pip_wire_names = create_get_pip_wire_names(conn) switch_name_map =", "- 1), \"below\": maybe_get_clk_hrow(i + 1), } # Find nodes", "within the clock region column is_bottom_sing = int(m.group(2)) % 50", "'.join( [feature] + find_hclk_cmt_hclk_feature( feature_path[0], m.group(1), m.group(2) ) ) m", "WHERE pkey = ? )\"\"\", (track_pkey, ) ) result =", "HilbertCurve(p, N) idx = 0 id_map = {} for h", "y_high, ptc, capacitance, resistance) in progressbar_utils.progressbar(cur.execute(\"\"\" SELECT pkey, track_pkey, graph_node_type,", "IOI_SING_REGEX = re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format( \"|\".join(IOI_SITE_PIPS) ) ) def reduce_connection_box(box): \"\"\"", "if is_bottom_sing else '0' # This is the value to", "NOT NULL;\"\"\")): if track_pkey not in alive_tracks: continue cur2.execute( \"\"\"", "FROM wire WHERE pkey = ? )\"\"\", (connection_box_wire_pkey, ) )", "= [None] + rebuf_and_hrow_tiles + [None] def maybe_get_clk_hrow(i): \"\"\" Returns", "\"CLK_HROW_BOT_R_%\" OR name LIKE \"CLK_HROW_TOP_R_%\" OR name LIKE \"CLK_BUFG_REBUF_%\" ORDER", "CLK_HROW_TOP_R and REBUF tiles. rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur) # Append None", "site_pin_delay=0., ) continue m = PIN_NAME_TO_PARTS.match(pin_name) assert m is not", "site_pin_delay ): \"\"\" Assign a connection box to an IPIN", "= synth_tile['tile_name'] wire = 'inpad' elif pin['port_type'] == 'VCC': tile_type", "result[0] segment_id = graph.get_segment_id_from_name(segment_name) else: segment_id = default_segment_id node_type =", "== 'HCLK_CMT': HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break", "== 0 # This is the value to attach to", "if gnd_block_id == grid_loc.block_type_id: assert gnd_loc is None gnd_loc =", "is not None, pin_name tile_type = m.group(1) tile_type = remove_vpr_tile_prefix(tile_type)", "grid_y FROM phy_tile ORDER BY grid_y DESC LIMIT 1;\") y_max", "'{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1]) ) features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1])) return ' '.join(features) m =", "does not report this connection as a PIP but in", "elif pin['port_type'] == 'output': tile_type = synth_tile['tile_name'] wire = 'inpad'", "For ROI configurations, this also connects the synthetic IO tiles", "the number of connection boxes by merging some. Examples: >>>", "['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']: continue hclk_x, hclk_y = grid.loc_of_tilename(tile) hclk_cmt_x = hclk_x", ") wire_in_tile_pkey = cur.fetchone()[0] box_id = connection_box_map[wire_in_tile_pkey] cur.execute( \"\"\" SELECT", "+ 1), } # Find nodes touching rebuf wires. cur.execute(", "if node_pkey not in REBUF_NODES: REBUF_NODES[node_pkey] = [] m =", "Instead of making it a pseudo-pip we simply reject fasm", "synth_tiles) print('{} Creating channels.'.format(now())) channels_obj = create_channels(conn) node_remap = create_node_remap(capnp_graph.graph.nodes,", "conn.cursor() cur.execute( \"\"\" SELECT chan_width_max, x_min, x_max, y_min, y_max FROM", "= block_types['SYN-GND'] vcc_block_id = block_types['SYN-VCC'] gnd_loc = None vcc_loc =", "graph, switch_name_map, switch_pkey ) src_node = node_mapping[src_graph_node] sink_node = node_mapping[dest_graph_node]", "IOI_OCLK.fullmatch(feature_path[1]) if m: enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1), feature_path[-1] )", "pip_name = None switch_id = get_switch_name( conn, graph, switch_name_map, switch_pkey", "switch_id, ()) if idx % 1024 == 0: bar.update(idx) def", "None on both ends of the list to simplify the", "input which is connected to VCC. # This sets the", "x_loc = 1 else: assert False, \"Impossible to determine X", "= grid.dims() for tile in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) if", "WHERE pkey = ( SELECT site_wire_pkey FROM node WHERE pkey", "# # When this PIP is active the IOB operates", "'output']: graph.add_edge( src_node=pin_node[0][0], sink_node=node_mapping[track_node], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) else: assert", "file' ) parser.add_argument( '--write_rr_node_map', required=True, help='Output map of graph_node_pkey to", "with that PIP in the prjxray db but there #", "# This sets the CE signal to constant 1 m", "(src_net, ) = cur.fetchone() cur.execute( \"\"\"SELECT name FROM wire_in_tile WHERE", "BUFHCE site # - ZINV_CE: to disable the inverter on", "of making it a pseudo-pip we simply reject fasm #", "graph, node_mapping, connection_box_map): cur = conn.cursor() get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur) get_site_as_tile_wire", "side = node.loc.side if side == tracks.Direction.LEFT: assert left_graph_node_pkey is", ") ) (src_net, ) = cur.fetchone() cur.execute( \"\"\"SELECT name FROM", "? )\"\"\", (track_pkey, ) ) result = cur2.fetchone() if result:", "wire_in_tile_pkey FROM wire WHERE pkey = ( SELECT site_wire_pkey FROM", ") if pin['port_type'] == 'input': graph.add_edge( src_node=node_mapping[track_node], sink_node=pin_node[0][0], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name,", "not in node_mapping: continue if dest_graph_node not in node_mapping: continue", "find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number): if (hclk_tile, lr) not in HCLK_CMT_TILES: return", "is not None: feature = check_feature(pip_name) if feature: yield (", "'loc': gnd_loc, 'pins': [ { 'wire': 'GND', 'pad': 'GND', 'port_type':", "if connection_box_wire_pkey is not None: cur.execute( \"\"\" SELECT grid_x, grid_y", "level up in the hierarchy (its tile-wide, not site-wide). So", "= datetime.datetime.now HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX =", "json from lib import progressbar_utils import datetime import re import", "math import prjxray.db from prjxray.roi import Roi import prjxray.grid as", "Add back missing switchs, which were unused in arch xml,", "cur2 = conn.cursor() for (graph_node_pkey, track_pkey, graph_node_type, x_low, x_high, y_low,", "IOI_SITE_PIPS names # in the destination wire of the pip", "len(results) == 1 wire_in_tile_pkey = results[0][0] return wire_in_tile_pkey return get_site_as_tile_wire", "WHERE name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\" OR name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\" ), rebuf_nodes(node_pkey)", "CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)') CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)') CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*') BUFG_CLK_IN_REGEX", "top_graph_node_pkey, node_idx, connection_box_map ) elif side == tracks.Direction.BOTTOM: assert bottom_graph_node_pkey", "segment_id = graph.get_segment_id_from_name(segment_name) else: segment_id = default_segment_id node_type = graph2.NodeType(graph_node_type)", "def create_get_tile_name(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_tile_name(tile_pkey): cur.execute( \"\"\"", "hrow_tile, m.group(1) ) ) else: assert False, (rebuf_tile, rebuf_wire_name) for", "{ \"above\": maybe_get_clk_hrow(i - 1), \"below\": maybe_get_clk_hrow(i + 1), }", "hclk_x hclk_cmt_y = hclk_y while hclk_cmt_x > 0: hclk_cmt_x -=", "node_pkey, rebuf_tile, rebuf_wire_name in cur: if node_pkey not in REBUF_NODES:", "IOI_OLOGIC1 dst_value = '0' if is_bottom_sing else '1' unchanged_feature =", "gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)] pin_name = graph.pin_ptc_to_name_map[ (gridloc.block_type_id, node.loc.ptc)] #", "pin_name ) if pin['port_type'] == 'input': graph.add_edge( src_node=node_mapping[track_node], sink_node=pin_node[0][0], switch_id=delayless_switch,", "parser.add_argument( '--vpr_capnp_schema_dir', help='Directory container VPR schema files', ) print('{} Starting", "= re.compile('CLK_HROW_R_CK_GCLK[0-9]+') CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+') HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)') IOI_OCLK =", "= {} grid = db.grid() _, x_max, _, _ =", "node specified. Rough structure: Add rr_nodes for CHANX and CHANY", "graph, node_idx, node.loc.x_low, node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'), site_pin_delay=0., ) continue m =", "if idx % 1024 == 0: bar.update(idx) def create_channels(conn): cur", "phy_tile.name, wire_in_tile.name FROM wire INNER JOIN phy_tile ON phy_tile.pkey =", "graph, graph_node_pkey, node_idx, connection_box_map ): \"\"\" Update connection box of", "'inpad' and num_inpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, pin['z_loc'],", "= get_pip_wire_names(pip_pkey) if not backward: pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net)", "\"above\" and \"below\" mean the opposite... rebuf_to_hrow_map = {} for", "of connection boxes by merging some. Examples: >>> reduce_connection_box('IMUX0') 'IMUX'", "cur = conn.cursor() for name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \\", "pin_name) node_mapping[bottom_graph_node_pkey] = node.id update_connection_box( conn, graph, bottom_graph_node_pkey, node_idx, connection_box_map", "database.'.format(now())) with progressbar_utils.ProgressBar(max_value=num_edges) as bar: for idx, (src_graph_node, dest_graph_node, switch_pkey,", "SELECT pkey FROM wire_in_tile WHERE name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\" OR name", "CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles. returns them in a list", "SELECT pkey FROM site_pin WHERE site_type_pkey = ? AND name", "handled below. if pin_name.startswith('SYN-'): set_connection_box( graph, node_idx, node.loc.x_low, node.loc.y_low, box_id=graph.maybe_add_connection_box('IMUX'),", "+ 1): coord = tuple(hilbert_curve.coordinates_from_distance(h)) if coord not in point_map:", "args.graph_limit.split(',')) roi = Roi( db=db, x1=x_min, y1=y_min, x2=x_max, y2=y_max, )", "\"\"\" Assign connection box ids for all connection box types.", "gnd_loc = (grid_loc.x, grid_loc.y) if vcc_block_id == grid_loc.block_type_id: assert vcc_loc", "= {} global REBUF_SOURCES REBUF_SOURCES = {} rebuf_wire_regexp = re.compile(", "in synth_tile['pins']: if pin['port_type'] in ['input', 'output']: wire_pkey = get_wire_pkey(conn,", "pkey FROM wire_in_tile WHERE name = ? AND phy_tile_type_pkey IN", "wire_in_tile.name FROM wire INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey", "cur = conn.cursor() cur.execute( \"\"\" SELECT chan_width_max, x_min, x_max, y_min,", ") break hclk_cmt_x = hclk_x while hclk_cmt_x < x_max: hclk_cmt_x", "REBUF_NODES[node_pkey].append( \"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE\".format( hrow_tile, m.group(1) ) ) elif m.group(2) == 'BOT':", "1024 == 0: bar.update(idx) def phy_grid_dims(conn): \"\"\" Returns physical grid", "): \"\"\" Update connection box of IPIN node if needed.", "= 'IMUX' if box.endswith('_L'): box = box.replace('_L', '') return box", "f: pickle.dump(node_mapping, f) print('{} Done writing node map.'.format(now())) if __name__", "y_max = cur.fetchone()[0] return x_max + 1, y_max + 1", "above and below CLK_HROW tile. Note that in # VPR", "PIP's feature to \"DIFF_OUT\" if feature_path[2] == \"IOB_DIFFO_OUT0\" and \\", "Importing graph nodes'.format(now())) import_graph_nodes(conn, graph, node_mapping, connection_box_map) # Walk all", "dimensions in x_min,y_min,x_max,y_max', ) parser.add_argument( '--vpr_capnp_schema_dir', help='Directory container VPR schema", "[None] + rebuf_and_hrow_tiles + [None] def maybe_get_clk_hrow(i): \"\"\" Returns a", "# Set of (src, sink, switch_id) tuples that pip edges", "INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey WHERE wire.wire_in_tile_pkey IN", "LIKE \"CLK_HROW_BOT_R_%\" OR name LIKE \"CLK_HROW_TOP_R_%\" OR name LIKE \"CLK_BUFG_REBUF_%\"", "(SELECT wire_in_tile_pkey FROM rebuf_wires) ORDER BY rebuf_nodes.node_pkey;\"\"\" ) for node_pkey,", "inverter on CE input which is connected to VCC. #", "= ( SELECT node_pkey FROM wire WHERE pkey = ?", "not backward: pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net) else: pip_name =", "required=True ) parser.add_argument( '--synth_tiles', help='If using an ROI, synthetic tile", "k in node_mapping: node_mapping[k] = node_remap(node_mapping[k]) print('{} Writing node map.'.format(now()))", "touching rebuf wires. cur.execute( \"\"\" WITH rebuf_wires(wire_in_tile_pkey) AS ( SELECT", "is not None: wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin) else: cur.execute( \"\"\"", "edges'.format(now())) add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles) print('{} Creating channels.'.format(now())) channels_obj", "node.id update_connection_box( conn, graph, top_graph_node_pkey, node_idx, connection_box_map ) elif side", "are now routed through, without the need of placing them,", "wire = 'VCC' elif pin['port_type'] == 'GND': tile_type = 'SYN-GND'", "in (graph2.NodeType.IPIN, graph2.NodeType.OPIN): continue gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)] pin_name =", "connection boxes by merging some. Examples: >>> reduce_connection_box('IMUX0') 'IMUX' >>>", "elif side == tracks.Direction.RIGHT: assert right_graph_node_pkey is not None, (tile_type,", "edge.sink_node, edge.switch_id, None) # Then yield edges from database. cur", "= 1 else: assert False, \"Impossible to determine X location", "them. print('{} Creating tracks'.format(now())) segment_id = graph.get_segment_id_from_name('dummy') create_track_rr_graph( conn, graph,", "edge feature to accomodate this # need, as the IOI_SING", "node_idx, connection_box_map ): \"\"\" Update connection box of IPIN node", "pin_node = graph.get_nodes_for_pin( tuple(synth_tile['loc']), pin_name ) if pin['port_type'] == 'input':", "graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, (pin['z_loc'] - num_inpad), wire ) else: pin_name =", "REBUF stuff rebuf_key = (feature_path[0], feature_path[1]) if rebuf_key in REBUF_SOURCES:", "= re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' ) cur = conn.cursor() # Find CLK_HROW_TOP_R,", "== 'inpad' and num_inpad > 1: pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type,", "HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) ) break def find_hclk_cmt_hclk_feature(hclk_tile,", ") = cur.fetchone() else: assert False, pin['port_type'] tracks_model, track_nodes =", "the destination wire of the pip # # Example: IOI_OLOGIC0", "\"\"\"): if src_graph_node not in node_mapping: continue if dest_graph_node not", "(switch_pkey, ) ) (switch_name, ) = cur.fetchone() switch_id = graph.get_switch_id(switch_name)", "synth_tiles, segment_id ): cur = conn.cursor() cur.execute(\"\"\"SELECT count(*) FROM track;\"\"\")", "dest_net) return get_pip_wire_names def get_number_graph_edges(conn, graph, node_mapping): num_edges = len(graph.edges)", "hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT_L': HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc( (hclk_cmt_x,", "m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"above\"] if hrow_tile is not None:", "hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)] return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)] def check_feature(feature): \"\"\"", "def main(): parser = argparse.ArgumentParser() parser.add_argument( '--db_root', required=True, help='Project X-Ray", "the edge feature to accomodate this # need, as the", "False, node_type canonical_loc = None cur2.execute( \"\"\" SELECT grid_x, grid_y", "value to attach to the IOI_SITE_PIPS names # in the", "import graph2 from lib.rr_graph import tracks from lib.connection_database import get_wire_pkey,", "VPR cannot handle duplicate paths with the same switch id.", "JOIN site ON site.pkey = site_as_tile.site_pkey WHERE site_as_tile.pkey = ?\"\"\",", "are used in differential input mode. # # Vivado does", "= find_constant_network(graph) with sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database), uri=True) as conn: populate_bufg_rebuf_map(conn) cur =", "not None, pin_name tile_type = m.group(1) tile_type = remove_vpr_tile_prefix(tile_type) pin", "m.group(2) == 'TOP': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"below\"]", "rr nodes with graph_node's in the connection_database. print('{} Importing graph", "= '{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_buffer_feature)) #", "site_pin_delay ) def create_get_tile_and_site_as_tile_pkey(cur): tiles = {} for tile_pkey, site_as_tile_pkey,", "in node_mapping: continue if dest_graph_node not in node_mapping: continue num_edges", "in cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey FROM graph_edge; \"\"\"): if src_graph_node", "= ? )\"\"\", (connection_box_wire_pkey, ) ) grid_x, grid_y = cur.fetchone()", ") = cur.fetchone() assert track_pkey is not None, ( tile_name,", "if m: enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1), feature_path[-1] ) return", "node.loc.side if side == tracks.Direction.LEFT: assert left_graph_node_pkey is not None,", "= result side = node.loc.side if side == tracks.Direction.LEFT: assert", "conn, graph, bottom_graph_node_pkey, node_idx, connection_box_map ) else: assert False, side", "channel XML node from connection database and serialize output to", "pin['port_type'] == 'GND': tile_type = 'SYN-GND' wire = 'GND' else:", "there on the list. \"\"\" tile = rebuf_and_hrow_tiles[i] if tile", "= default_segment_id node_type = graph2.NodeType(graph_node_type) if node_type == graph2.NodeType.CHANX: direction", "WHERE alive = 1;\"): alive_tracks.add(track_pkey) print('{} Importing alive tracks'.format(now())) import_tracks(conn,", "count() FROM graph_edge;\" \"\") (num_edges, ) = cur.fetchone() get_tile_name =", "FROM wire WHERE pkey = ? );\"\"\", (wire_pkey, ) )", "'.join((feature, enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] )", "\"{}{}{}{}\".format( m.group(1), m.group(2), m.group(3), m.group(4) ) src_wire = m.group(6).replace('_SING', '')", "switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) elif pin['port_type'] in ['VCC', 'GND', 'output']:", "= cur.fetchall() assert len(results) == 1 wire_in_tile_pkey = results[0][0] return", "node_pkey not in REBUF_NODES: REBUF_NODES[node_pkey] = [] m = rebuf_wire_regexp.fullmatch(rebuf_wire_name)", "wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires) ORDER BY rebuf_nodes.node_pkey;\"\"\" )", "are hard wires that connect IOB33M and IOB33S sites. #", "cur.fetchone() get_tile_name = create_get_tile_name(conn) get_pip_wire_names = create_get_pip_wire_names(conn) switch_name_map = {}", "= create_get_pip_wire_names(conn) switch_name_map = {} print('{} Importing edges from database.'.format(now()))", "% 1024 == 0: bar.update(idx) def create_channels(conn): cur = conn.cursor()", "boxes by merging some. Examples: >>> reduce_connection_box('IMUX0') 'IMUX' >>> reduce_connection_box('IMUX1')", "IOI TILE. # # The following, is to change the", "= graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, (pin['z_loc'] - num_inpad), wire ) else: pin_name", "wire_in_tile_pkey = results[0][0] tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1]) cur.execute( \"\"\"", "1 else: assert False, \"Impossible to determine X location of", "y_max = cur.fetchone() cur.execute('SELECT idx, info FROM x_list;') x_list =", "Starting routing import'.format(now())) args = parser.parse_args() db = prjxray.db.Database(args.db_root, args.part)", "site pins rr nodes with graph_node's in the connection_database. print('{}", "= grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT': HCLK_CMT_TILES[tile, 'L'] =", "wire ) else: pin_name = graph.create_pin_name_from_tile_type_and_pin( tile_type, wire ) pin_node", "reduce_connection_box('IMUX0') 'IMUX' >>> reduce_connection_box('IMUX1') 'IMUX' >>> reduce_connection_box('IMUX10') 'IMUX' >>> reduce_connection_box('BRAM_ADDR')", "(hclk_cmt_x, hclk_cmt_y) ) break hclk_cmt_x = hclk_x while hclk_cmt_x <", "num_outpad = len( list( filter( lambda t: t['port_type'] == 'input',", "whole # IOI/IOB column. The Y coordinate identified with the", "of a normal IOI tile, while the BOTTOM IOI_SING #", "SELECT pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance,", "tiles and create synth_tiles input. All arches should have these", "group is dived by 50 to get the relative #", "BY rebuf_nodes.node_pkey;\"\"\" ) for node_pkey, rebuf_tile, rebuf_wire_name in cur: if", "also connects the synthetic IO tiles to the routing node", "continue cur2.execute( \"\"\" SELECT name FROM segment WHERE pkey =", "FROM pip_in_tile WHERE pkey = ?;\"\"\", (pip_pkey, ) ) src_wire_in_tile_pkey,", "pip names # despite they are found on the TOP", "PIP in the prjxray db but there # is a", "enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature = '{}.{}_ACTIVE'.format( feature_path[0], feature_path[-1] ) return", "? );\"\"\", (wire_pkey, ) ) (track_pkey, ) = cur.fetchone() assert", "feature_path[2] == \"IOB_PADOUT0\" and feature_path[1] == \"IOB_DIFFI_IN1\": return '' if", "node_mapping[left_graph_node_pkey] = node.id update_connection_box( conn, graph, left_graph_node_pkey, node_idx, connection_box_map )", "fabric to the rr graph. For ROI configurations, this also", "return ' '.join((feature, enable_oclkm_feature)) if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]): enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format( feature_path[0],", "existing edges print('{} Importing existing edges.'.format(now())) for edge in graph.edges:", "traversed, the correct fasm feature needs to be added. #", "y_list = [] for idx, info in cur: y_list.append(graph2.ChannelList(idx, info))", "if track_pkey not in alive_tracks: continue cur2.execute( \"\"\" SELECT name", "== \"IOB_PADOUT1\" and feature_path[1] == \"IOB_DIFFI_IN0\": return '' # REBUF", "phy_tile.pkey = wire.phy_tile_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey", "pip in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(src_value)) if 'IOI_OCLK' in", "the input rr_graph. Create a mapping between database graph_nodes and", "cur.execute(\"SELECT grid_y FROM phy_tile ORDER BY grid_y DESC LIMIT 1;\")", "= graph2.CanonicalLoc(x=result[0], y=result[1]) track = tracks.Track( direction=direction, x_low=x_low, x_high=x_high, y_low=y_low,", "REBUF_SOURCES = {} rebuf_wire_regexp = re.compile( 'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)' ) cur =", "Check if enabling this feature requires other features to be", "relevant features are: # - IN_USE: to enable the BUFHCE", "}, ], }, \"GND\": { 'loc': gnd_loc, 'pins': [ {", "m = PIN_NAME_TO_PARTS.match(pin_name) assert m is not None, pin_name tile_type", "gridinfo.tile_type == 'HCLK_CMT': HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc( (hclk_cmt_x, hclk_cmt_y) )", "help='Directory container VPR schema files', ) print('{} Starting routing import'.format(now()))", "== 'VCC': tile_type = 'SYN-VCC' wire = 'VCC' elif pin['port_type']", "= '{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format( feature_path[0], bufhce_loc ) return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce))", "in graph.block_types: block_types[block_type.name] = block_type.id assert 'SYN-GND' in block_types assert", "track_pkey) option = list( tracks_model.get_tracks_for_wire_at_coord( tuple(synth_tile['loc']) ).values() ) assert len(option)", "x_loc = 0 elif 'R' in x_loc_str: x_loc = 1", "), rebuf_nodes(node_pkey) AS ( SELECT DISTINCT node_pkey FROM wire WHERE", "enable_cascout)) parts = feature.split('.') wire_feature = feature_when_routed(parts[1]) if wire_feature is", "cur.fetchone() cur.execute( \"SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?\",", ") ) ) )\"\"\", (graph_node_pkey, ) ) site_pin_delay = cur.fetchone()[0]", "mapping between database graph_nodes and IPIN, OPIN, CHANX and CHANY", "'L' in x_loc_str: x_loc = 0 elif 'R' in x_loc_str:", "else: segment_id = default_segment_id node_type = graph2.NodeType(graph_node_type) if node_type ==", "'input', synth_tile['pins'] ) ) ) for pin in synth_tile['pins']: if", "(rebuf_tile, rebuf_wire_name) for node_pkey in REBUF_NODES: cur.execute( \"\"\" SELECT phy_tile.name,", "= len(graph.edges) print('{} Counting edges.'.format(now())) cur = conn.cursor() cur.execute(\"SELECT count()", "\"\"\" import argparse import os.path from hilbertcurve.hilbertcurve import HilbertCurve import", "\"CLK_BUFG_REBUF_R_CK_GCLK%_BOT\" OR name LIKE \"CLK_BUFG_REBUF_R_CK_GCLK%_TOP\" ), rebuf_nodes(node_pkey) AS ( SELECT", "rebuf_to_hrow_map[tile_name] = { \"above\": maybe_get_clk_hrow(i - 1), \"below\": maybe_get_clk_hrow(i +", "alive_tracks = set() for (track_pkey, ) in cur.execute(\"SELECT pkey FROM", "of graph_node.pkey to rr node id. node_mapping = {} print('{}", "results[0][0] tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1]) cur.execute( \"\"\" SELECT top_graph_node_pkey,", "m.group(1) ) return ' '.join((feature, enable_cascout)) parts = feature.split('.') wire_feature", "= ?\"\"\", (graph_node_pkey, ) ) connection_box_wire_pkey = cur.fetchone()[0] if connection_box_wire_pkey", "1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT': HCLK_CMT_TILES[tile,", "up in the hierarchy (its tile-wide, not site-wide). So here", "if pip_pkey is not None: tile_name = get_tile_name(phy_tile_pkey) src_net, dest_net", "to the routing node specified. Rough structure: Add rr_nodes for", "rebuf_to_hrow_map = {} for i, tile_name in enumerate(rebuf_and_hrow_tiles): if tile_name", "if idx % 1024 == 0: bar.update(idx) def phy_grid_dims(conn): \"\"\"", "\"{}{}\".format(dst_value, src_wire) feature = \"{}{}\".format(unchanged_feature, changed_feature) feature_path = feature.split('.') #", "wire_feature is not None: return '{} {}.{}'.format(feature, parts[0], wire_feature) return", "(wire_pkey, ) ) (track_pkey, ) = cur.fetchone() assert track_pkey is", "name='synth_{}_{}'.format(tile_name, pin['wire']), ) elif pin['port_type'] in ['VCC', 'GND', 'output']: graph.add_edge(", "'{}.ENABLE_BUFFER.{}'.format( feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_buffer_feature)) # BUFHCE", "constant_sources') (track_pkey, ) = cur.fetchone() else: assert False, pin['port_type'] tracks_model,", "'OCLK_', 'OCLKM_'] IOI_SING_REGEX = re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format( \"|\".join(IOI_SITE_PIPS) ) ) def", "of 50 IOBs. # The IOI_SING are found on top", "wire_in_tile WHERE pkey = ( SELECT wire_in_tile_pkey FROM wire WHERE", "( SELECT node_pkey FROM graph_node WHERE pkey = ? )", "(src_node, sink_node, switch_id, ()) else: yield (src_node, sink_node, switch_id, ())", "site_pkey = get_site_from_site_as_tile( site_as_tile_pkey ) cur.execute( \"\"\" SELECT pkey FROM", "connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box( reduce_connection_box(wire_name) ) return connection_box_map def yield_nodes(nodes): with", "return switch_id def create_get_tile_name(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_tile_name(tile_pkey):", "graph) # Match site pins rr nodes with graph_node's in", "graph.get_nodes_for_pin( tuple(synth_tile['loc']), pin_name ) if pin['port_type'] == 'input': graph.add_edge( src_node=node_mapping[track_node],", "src_node=pin_node[0][0], sink_node=node_mapping[track_node], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) else: assert False, pin", "prjxray.grid as grid from lib.rr_graph import graph2 from lib.rr_graph import", "if tile_name is not None and tile_name.startswith(\"CLK_BUFG_REBUF\"): rebuf_to_hrow_map[tile_name] = {", "= graph2.Node(**node_dict) def update_connection_box( conn, graph, graph_node_pkey, node_idx, connection_box_map ):", "nodes'.format(now())) import_graph_nodes(conn, graph, node_mapping, connection_box_map) # Walk all track graph", "global REBUF_NODES REBUF_NODES = {} global REBUF_SOURCES REBUF_SOURCES = {}", "wire.wire_in_tile_pkey WHERE wire.node_pkey = ?;\"\"\", (node_pkey, ) ) for tile,", "\"\"\" cur.execute( \"\"\" SELECT name FROM phy_tile WHERE name LIKE", "have the same wire, and pip names # despite they", "wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin) else: cur.execute( \"\"\" SELECT pkey FROM", "help='Output map of graph_node_pkey to rr inode file' ) parser.add_argument(", "m = IOI_OCLK.fullmatch(feature_path[1]) if m: enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1),", "grid_y DESC; \"\"\" ) return [t[0] for t in cur.fetchall()]", "1 wire_in_tile_pkey = results[0][0] tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1]) cur.execute(", "capnp_graph.graph if synth_tiles is None: synth_tiles = find_constant_network(graph) with sqlite3.connect(\"file:{}?mode=ro\".format(args.connection_database),", "if not backward: pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net) else: pip_name", "# When this PIP is active the IOB operates in", ") assert len(option) > 0, (pin, len(option)) if pin['port_type'] ==", "\"\"\" ) return [t[0] for t in cur.fetchall()] def populate_bufg_rebuf_map(conn):", "which were unused in arch xml, and so # were", "capacitance, resistance FROM graph_node WHERE track_pkey IS NOT NULL;\"\"\")): if", "SELECT node_pkey FROM wire WHERE pkey = ? );\"\"\", (wire_pkey,", "top half of a normal IOI TILE. # # The", "grid, synth_tiles): cur = conn.cursor() delayless_switch = graph.get_switch_id('__vpr_delayless_switch__') for tile_name,", "pkey IN ( SELECT phy_tile_pkey FROM tile_map WHERE tile_pkey =", "find_hclk_cmt_hclk_feature( feature_path[0], m.group(1), m.group(2) ) ) m = CASCOUT_REGEX.fullmatch(feature_path[-2]) if", "node_mapping[top_graph_node_pkey] = node.id update_connection_box( conn, graph, top_graph_node_pkey, node_idx, connection_box_map )", "in node_mapping node_mapping[graph_node_pkey] = graph.add_track( track=track, segment_id=segment_id, ptc=ptc, timing=graph2.NodeTiming( r=resistance,", "= grid.loc_of_tilename(tile) hclk_cmt_x = hclk_x hclk_cmt_y = hclk_y while hclk_cmt_x", "pin['port_type'] == 'VCC': cur.execute('SELECT vcc_track_pkey FROM constant_sources') (track_pkey, ) =", "cur: REBUF_SOURCES[(tile, wire_name)] = node_pkey HCLK_CMT_TILES = {} def populate_hclk_cmt_tiles(db):", "switch_pkey ) src_node = node_mapping[src_graph_node] sink_node = node_mapping[dest_graph_node] if pip_name", "y_max + 1 def find_constant_network(graph): \"\"\" Find VCC and GND", "the graph_edge table. Import channel XML node from connection database", "'{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format( feature_path[0], bufhce_loc ) return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce)) if", ") ) num_outpad = len( list( filter( lambda t: t['port_type']", "lr) not in HCLK_CMT_TILES: return [] hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)]", "that the TOP IOI_SING tile shares bits with # the", "'output']: wire_pkey = get_wire_pkey(conn, tile_name, pin['wire']) cur.execute( \"\"\" SELECT track_pkey", "[] for idx, info in cur: y_list.append(graph2.ChannelList(idx, info)) return graph2.Channels(", "graph.grid: if gnd_block_id == grid_loc.block_type_id: assert gnd_loc is None gnd_loc", "not None assert vcc_loc is not None synth_tiles = {", "? ) ) ) )\"\"\", (graph_node_pkey, ) ) site_pin_delay =", "graph, bottom_graph_node_pkey, node_idx, connection_box_map ) else: assert False, side def", "y = node.loc.y_low if (x, y) not in point_map: point_map[(x,", "== 'TOP': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1)) ) hrow_tile = rebuf_to_hrow_map[rebuf_tile][\"below\"] if", "is traversed, the correct fasm feature needs to be added.", "dst_value = '0' if is_bottom_sing else '1' unchanged_feature = \"{}{}{}{}\".format(", "x_list=x_list, y_list=y_list, ) def create_connection_boxes(conn, graph): \"\"\" Assign connection box", "tiles, search the input rr graph for the SYN-GND and", "canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1]) track = tracks.Track( direction=direction, x_low=x_low, x_high=x_high,", "for old_id in point_map[coord]: id_map[old_id] = idx idx += 1", "name = ? AND phy_tile_type_pkey IN ( SELECT tile_type_pkey FROM", ") for tile, wire_name in cur: REBUF_SOURCES[(tile, wire_name)] = node_pkey", "= {} print('{} Creating connection box list'.format(now())) connection_box_map = create_connection_boxes(conn,", "Create a mapping between database graph_nodes and IPIN, OPIN, CHANX", "grid_y, box_id, site_pin_delay ): \"\"\" Assign a connection box to", "name FROM switch WHERE pkey = ?;\"\"\", (switch_pkey, ) )", "node_mapping[right_graph_node_pkey] = node.id update_connection_box( conn, graph, right_graph_node_pkey, node_idx, connection_box_map )", "FROM phy_tile WHERE pkey = ( SELECT canon_phy_tile_pkey FROM track", "VPR. VPR cannot handle duplicate paths with the same switch", "routing node specified. Rough structure: Add rr_nodes for CHANX and", "list to simplify the code below. rebuf_and_hrow_tiles = [None] +", "pin['port_type'] == 'VCC': tile_type = 'SYN-VCC' wire = 'VCC' elif", "fasm # features here. if feature_path[2] == \"IOB_PADOUT0\" and feature_path[1]", "for pin in synth_tile['pins']: if pin['port_type'] in ['input', 'output']: wire_pkey", "(src_node, sink_node, switch_id, ()) if idx % 1024 == 0:", "'_{}'.format(src_value)) if 'IOI_OCLK' in src_wire: src_wire = src_wire.replace('_0', '_{}'.format(dst_value)) changed_feature", "import_graph_nodes(conn, graph, node_mapping, connection_box_map): cur = conn.cursor() get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur)", "tile in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) if gridinfo.tile_type not in", "WHERE tile_pkey = ? ) );\"\"\", (pin, tile_pkey) ) results", "penalty_cost, \\ switch_type in cur.execute(\"\"\" SELECT name, internal_capacitance, drive_resistance, intrinsic_delay,", "+ [None] def maybe_get_clk_hrow(i): \"\"\" Returns a name of CLK_HROW", "of IPIN node if needed. \"\"\" cur = conn.cursor() cur.execute(", "= conn.cursor() for name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \\ switch_type", "of graph_node_pkey to rr inode file' ) parser.add_argument( '--connection_database', help='Database", "if m.group(2) == 'TOP': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1)) ) hrow_tile =", "}, } } return synth_tiles def create_node_remap(nodes, channels_obj): N =", "WHERE pkey = ( SELECT segment_pkey FROM track WHERE pkey", "channel;\"\"\" ) chan_width_max, x_min, x_max, y_min, y_max = cur.fetchone() cur.execute('SELECT", "duplicate paths with the same switch id. print('{} Adding synthetic", ") ) ) for pin in synth_tile['pins']: if pin['port_type'] in", "cur = conn.cursor() cur.execute(\"SELECT count() FROM graph_edge;\" \"\") for src_graph_node,", ") elif side == tracks.Direction.BOTTOM: assert bottom_graph_node_pkey is not None,", "info FROM y_list;') y_list = [] for idx, info in", "synth_tile['tile_name'] wire = 'inpad' elif pin['port_type'] == 'VCC': tile_type =", "FROM constant_sources') (track_pkey, ) = cur.fetchone() else: assert False, pin['port_type']", "WHERE pkey = ? )\"\"\", (connection_box_wire_pkey, ) ) grid_x, grid_y", "name LIKE \"CLK_BUFG_REBUF_%\" ORDER BY grid_y DESC; \"\"\" ) return", "{} for block_type in graph.block_types: block_types[block_type.name] = block_type.id assert 'SYN-GND'", "( src_node, sink_node, switch_id, (('fasm_features', feature), ) ) else: yield", "SELECT site.site_type_pkey, site_as_tile.site_pkey FROM site_as_tile INNER JOIN site ON site.pkey", "Find VCC and GND tiles and create synth_tiles input. All", "named \"DIFF_OUT\". # # The \"DIFF_OUT\" cannot be set in", "== 'input': graph.add_edge( src_node=node_mapping[track_node], sink_node=pin_node[0][0], switch_id=delayless_switch, name='synth_{}_{}'.format(tile_name, pin['wire']), ) elif", "node in enumerate(nodes): yield node if idx % 1024 ==", "enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format( feature_path[0], bufhce_loc ) enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format( feature_path[0],", "= None capnp_graph = capnp_graph2.Graph( rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp' ), input_file_name=args.read_rr_graph,", "# First yield existing edges print('{} Importing existing edges.'.format(now())) for", "feature_path = feature.split('.') # IOB_DIFFO_OUT0->IOB_DIFFO_IN1 # # When this PIP", "vcc_block_id = block_types['SYN-VCC'] gnd_loc = None vcc_loc = None for", "feature_when_routed from prjxray_tile_import import remove_vpr_tile_prefix import simplejson as json from", "the connection_database. print('{} Importing graph nodes'.format(now())) import_graph_nodes(conn, graph, node_mapping, connection_box_map)", "grid.loc_of_tilename(tile) hclk_cmt_x = hclk_x hclk_cmt_y = hclk_y while hclk_cmt_x >", "found on top or bottom of the whole # IOI/IOB", "progressbar_utils.progressbar(cur.execute(\"\"\" SELECT pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc,", ") ) m = CASCOUT_REGEX.fullmatch(feature_path[-2]) if m: enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format(", "graph.create_pin_name_from_tile_type_sub_tile_num_and_pin( tile_type, pin['z_loc'], wire ) elif wire == 'outpad' and", "that pip edges have been sent to # VPR. VPR", "intrinsic_delay, penalty_cost, \\ switch_type in cur.execute(\"\"\" SELECT name, internal_capacitance, drive_resistance,", "for (graph_node_pkey, track_pkey, graph_node_type, x_low, x_high, y_low, y_high, ptc, capacitance,", "node_mapping = {} print('{} Creating connection box list'.format(now())) connection_box_map =", "switch;\"\"\"): # Add back missing switchs, which were unused in", "to attach to the source pip name that # changes", "m.group(2) ) ) m = CASCOUT_REGEX.fullmatch(feature_path[-2]) if m: enable_cascout =", "FROM node WHERE pkey = ( SELECT node_pkey FROM graph_node", "( tile_name, pin['wire'], wire_pkey ) elif pin['port_type'] == 'VCC': cur.execute('SELECT", "num_edges=num_edges, edges_obj=import_graph_edges(conn, graph, node_mapping), node_remap=node_remap, ) for k in node_mapping:", "The difference is that the TOP IOI_SING tile shares bits", "is None gnd_loc = (grid_loc.x, grid_loc.y) if vcc_block_id == grid_loc.block_type_id:", "y_low, y_high, ptc, capacitance, resistance FROM graph_node WHERE track_pkey IS", "feature_path[0], bufhce_loc ) return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce)) if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]):", "graph_node WHERE pkey = ?\"\"\", (graph_node_pkey, ) ) connection_box_wire_pkey =", "use_roi = True with open(args.synth_tiles) as f: synth_tiles = json.load(f)", "below CLK_HROW tile. Note that in # VPR coords terms.", "wire_in_tile_pkey = results[0][0] return wire_in_tile_pkey return get_site_as_tile_wire def import_graph_nodes(conn, graph,", "in the graph_edge table. Import channel XML node from connection", "print('{} Creating channels.'.format(now())) channels_obj = create_channels(conn) node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj)", "def get_clk_hrow_and_rebuf_tiles_sorted(cur): \"\"\" Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles.", "= create_channels(conn) node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj) x_dim, y_dim = phy_grid_dims(conn)", "= { \"above\": maybe_get_clk_hrow(i - 1), \"below\": maybe_get_clk_hrow(i + 1),", "feature: yield ( src_node, sink_node, switch_id, (('fasm_features', feature), ) )", "= node_remap(node_mapping[k]) print('{} Writing node map.'.format(now())) with open(args.write_rr_node_map, 'wb') as", "to the rr graph. For ROI configurations, this also connects", "= create_node_remap(capnp_graph.graph.nodes, channels_obj) x_dim, y_dim = phy_grid_dims(conn) connection_box_obj = graph.create_connection_box_object(", "stuff rebuf_key = (feature_path[0], feature_path[1]) if rebuf_key in REBUF_SOURCES: return", "result = cur.fetchone() assert result is not None, (wire_in_tile_pkey, tile_pkey)", "if gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']: continue hclk_x, hclk_y =", "else: pin_name = graph.create_pin_name_from_tile_type_and_pin( tile_type, wire ) pin_node = graph.get_nodes_for_pin(", "= [] for idx, info in cur: y_list.append(graph2.ChannelList(idx, info)) return", "list sorted according to their Y coordinates. \"\"\" cur.execute( \"\"\"", "update_connection_box( conn, graph, left_graph_node_pkey, node_idx, connection_box_map ) elif side ==", "= cur.fetchone() cur.execute( \"\"\"SELECT name FROM wire_in_tile WHERE pkey =", "dest_net = get_pip_wire_names(pip_pkey) if not backward: pip_name = '{}.{}.{}'.format(tile_name, dest_net,", "rr_graph_schema_fname=os.path.join( args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp' ), input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph, ) graph =", "sink_node = node_mapping[dest_graph_node] if pip_name is not None: feature =", "rr_graph. Create a mapping between database graph_nodes and IPIN, OPIN,", "x1=synth_tiles['info']['GRID_X_MIN'], y1=synth_tiles['info']['GRID_Y_MIN'], x2=synth_tiles['info']['GRID_X_MAX'], y2=synth_tiles['info']['GRID_Y_MAX'], ) print('{} generating routing graph for", "in x_loc_str: x_loc = 0 elif 'R' in x_loc_str: x_loc", "[] m = rebuf_wire_regexp.fullmatch(rebuf_wire_name) if m.group(2) == 'TOP': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile,", ">>> reduce_connection_box('IMUX10') 'IMUX' >>> reduce_connection_box('BRAM_ADDR') 'IMUX' >>> reduce_connection_box('A_L10') 'A' >>>", "graph_node WHERE connection_box_wire_pkey IS NOT NULL ) );\"\"\" ) connection_box_map", "return lambda x: id_map[x] def main(): parser = argparse.ArgumentParser() parser.add_argument(", "connection box types. \"\"\" cur = conn.cursor() cur.execute( \"\"\" SELECT", "(src_wire_in_tile_pkey, ) ) (src_net, ) = cur.fetchone() cur.execute( \"\"\"SELECT name", "features. Example: .HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10 implies: .ENABLE_BUFFER.HCLK_CK_BUFHCLK10 \"\"\" # IOI_SING tiles have", "cur = conn.cursor() delayless_switch = graph.get_switch_id('__vpr_delayless_switch__') for tile_name, synth_tile in", "reduce_connection_box('B_L') 'B' \"\"\" box = CONNECTION_BOX_FILTER.match(box).group(1) if 'BRAM_ADDR' in box:", "cur.fetchall() assert len(results) == 1 wire_in_tile_pkey = results[0][0] return wire_in_tile_pkey", "a normal IOI TILE. # # The following, is to", "#!/usr/bin/env python3 \"\"\" Imports 7-series routing fabric to the rr", "Database' ) parser.add_argument('--part', required=True, help='FPGA part') parser.add_argument( '--read_rr_graph', required=True, help='Input", "table. Import channel XML node from connection database and serialize", "connection_box_map[wire_in_tile_pkey] cur.execute( \"\"\" SELECT switch.intrinsic_delay FROM switch WHERE pkey =", "hclk_y while hclk_cmt_x > 0: hclk_cmt_x -= 1 gridinfo =", "CHANX and CHANY rr_node ids in the rr_graph. Add rr_edge", "?\"\"\", (site_as_tile_pkey, ) ) results = cur.fetchall() assert len(results) ==", "node_pkey in REBUF_NODES: cur.execute( \"\"\" SELECT phy_tile.name, wire_in_tile.name FROM wire", "get_tile_name = create_get_tile_name(conn) get_pip_wire_names = create_get_pip_wire_names(conn) switch_name_map = {} print('{}", "of the pip # # Example: IOI_OLOGIC0 -> IOI_OLOGIC1 dst_value", "result is not None: segment_name = result[0] segment_id = graph.get_segment_id_from_name(segment_name)", "in cur: REBUF_SOURCES[(tile, wire_name)] = node_pkey HCLK_CMT_TILES = {} def", "resistance FROM graph_node WHERE track_pkey IS NOT NULL;\"\"\")): if track_pkey", "= ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_'] IOI_SING_REGEX = re.compile( r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\\.IOI_)({})([01])(.*)'.format(", ") num_outpad = len( list( filter( lambda t: t['port_type'] ==", "0 elif 'R' in x_loc_str: x_loc = 1 else: assert", "'{}.{}.{}'.format(tile_name, src_net, dest_net) else: pip_name = None switch_id = get_switch_name(", "have been sent to # VPR. VPR cannot handle duplicate", "grid_x, grid_y FROM tile;\"\"\"): tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey) def", "None # Assign each REBUF tile its above and below", "'0' # This is the value to attach to the", "track WHERE alive = 1;\"): alive_tracks.add(track_pkey) print('{} Importing alive tracks'.format(now()))", "m is not None, pin_name tile_type = m.group(1) tile_type =", "need of placing them, therefore, # when the relative pip", "feature_path[0], feature_path[-1] ) return ' '.join((feature, enable_feature)) if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]): enable_feature", ") src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone() cur.execute( \"\"\"SELECT name FROM wire_in_tile", "VPR coords terms. \"above\" and \"below\" mean the opposite... rebuf_to_hrow_map", "is active the IOB operates in the differential output mode.", "graph_edge; \"\"\"): if src_graph_node not in node_mapping: continue if dest_graph_node", ") print('{} generating routing graph for ROI.'.format(now())) elif args.graph_limit: use_roi", "m = CASCOUT_REGEX.fullmatch(feature_path[-2]) if m: enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format( feature_path[0], m.group(1)", "t['port_type'] == 'output', synth_tile['pins'] ) ) ) num_outpad = len(", "# # The \"DIFF_OUT\" cannot be set in the architecture", "node_mapping node_mapping[graph_node_pkey] = graph.add_track( track=track, segment_id=segment_id, ptc=ptc, timing=graph2.NodeTiming( r=resistance, c=capacitance,", "Find nodes touching rebuf wires. cur.execute( \"\"\" WITH rebuf_wires(wire_in_tile_pkey) AS", "enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format( feature_path[0], m.group(1), feature_path[-1] ) return ' '.join((feature,", "FROM phy_tile WHERE pkey = ( SELECT phy_tile_pkey FROM wire", "and IPIN, OPIN, CHANX and CHANY rr_node ids in the", "conn.cursor() # Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles. rebuf_and_hrow_tiles =", "channels_obj) x_dim, y_dim = phy_grid_dims(conn) connection_box_obj = graph.create_connection_box_object( x_dim=x_dim, y_dim=y_dim", "create_node_remap(capnp_graph.graph.nodes, channels_obj) x_dim, y_dim = phy_grid_dims(conn) connection_box_obj = graph.create_connection_box_object( x_dim=x_dim,", "= ?;\"\"\", (pip_pkey, ) ) src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone() cur.execute(", "SELECT connection_box_wire_pkey FROM graph_node WHERE pkey = ?\"\"\", (graph_node_pkey, )", "'A' >>> reduce_connection_box('B') 'B' >>> reduce_connection_box('B_L') 'B' \"\"\" box =", "feature_path[2] == \"IOB_DIFFO_OUT0\" and \\ feature_path[1] == \"IOB_DIFFO_IN1\": return '{}.OUT_DIFF'.format(feature_path[0])", "(('fasm_features', feature), ) ) else: yield (src_node, sink_node, switch_id, ())", "tracks.Direction.RIGHT: assert right_graph_node_pkey is not None, (tile_type, pin_name) node_mapping[right_graph_node_pkey] =", "args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp' ), input_file_name=args.read_rr_graph, progressbar=progressbar_utils.progressbar, output_file_name=args.write_rr_graph, ) graph = capnp_graph.graph", "are handled below. if pin_name.startswith('SYN-'): set_connection_box( graph, node_idx, node.loc.x_low, node.loc.y_low,", "pin_name = graph.create_pin_name_from_tile_type_and_pin( tile_type, wire ) pin_node = graph.get_nodes_for_pin( tuple(synth_tile['loc']),", "cur.execute(\"SELECT count() FROM graph_edge;\" \"\") for src_graph_node, dest_graph_node in cur.execute(\"\"\"", "Roi import prjxray.grid as grid from lib.rr_graph import graph2 from", "is not None and tile.startswith(\"CLK_HROW\"): return tile return None #", "hclk_cmt_x > 0: hclk_cmt_x -= 1 gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y))", "import math import prjxray.db from prjxray.roi import Roi import prjxray.grid", "return (src_net, dest_net) return get_pip_wire_names def get_number_graph_edges(conn, graph, node_mapping): num_edges", "is not None, (tile_type, pin_name) node_mapping[bottom_graph_node_pkey] = node.id update_connection_box( conn,", "= node_mapping[dest_graph_node] if pip_name is not None: feature = check_feature(pip_name)", "return ' '.join((feature, enable_feature)) if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]): features = [feature] features.append(", "use_roi = True x_min, y_min, x_max, y_max = map(int, args.graph_limit.split(','))", "map(int, args.graph_limit.split(',')) roi = Roi( db=db, x1=x_min, y1=y_min, x2=x_max, y2=y_max,", "False, \"Impossible to determine X location of BUFHCE\" y_loc =", "sorted according to their Y coordinates. \"\"\" cur.execute( \"\"\" SELECT", "routing fabric to the rr graph. For ROI configurations, this", "be present from the input rr_graph. Create a mapping between", "by merging some. Examples: >>> reduce_connection_box('IMUX0') 'IMUX' >>> reduce_connection_box('IMUX1') 'IMUX'", "IOI tiles. # # The difference is that the TOP", "if dest_graph_node not in node_mapping: continue if pip_pkey is not", "Serializing to disk.'.format(now())) capnp_graph.serialize_to_capnp( channels_obj=channels_obj, connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges, edges_obj=import_graph_edges(conn,", "= graph.get_segment_id_from_name('dummy') create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id", "enumerate(nodes): yield node if idx % 1024 == 0: bar.update(idx)", "to accomodate this # need, as the IOI_SING tiles have", "X location of BUFHCE\" y_loc = m.group(2) bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc,", "pkey = ?;\"\"\", (src_wire_in_tile_pkey, ) ) (src_net, ) = cur.fetchone()", "required=True, help='Input rr_graph file' ) parser.add_argument( '--write_rr_graph', required=True, help='Output rr_graph", "cur.execute( \"\"\" SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey", "grid_x, grid_y = cur.fetchone() cur.execute( \"SELECT wire_in_tile_pkey FROM wire WHERE", "print('{} Serializing to disk.'.format(now())) capnp_graph.serialize_to_capnp( channels_obj=channels_obj, connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes), nodes_obj=yield_nodes(capnp_graph.graph.nodes), num_edges=num_edges,", "(pin, len(option)) if pin['port_type'] == 'input': tile_type = synth_tile['tile_name'] wire", "phy_tile WHERE pkey = ?; \"\"\", (tile_pkey, ) ) return", "wire_in_tile WHERE name = ? AND phy_tile_type_pkey IN ( SELECT", "y_list.append(graph2.ChannelList(idx, info)) return graph2.Channels( chan_width_max=chan_width_max, x_min=x_min, y_min=y_min, x_max=x_max, y_max=y_max, x_list=x_list,", "synth_tiles['tiles'].items(): num_inpad = len( list( filter( lambda t: t['port_type'] ==", "gnd_block_id = block_types['SYN-GND'] vcc_block_id = block_types['SYN-VCC'] gnd_loc = None vcc_loc", "part') parser.add_argument( '--read_rr_graph', required=True, help='Input rr_graph file' ) parser.add_argument( '--write_rr_graph',", "'_{}'.format(dst_value)) changed_feature = \"{}{}\".format(dst_value, src_wire) feature = \"{}{}\".format(unchanged_feature, changed_feature) feature_path", "import os.path from hilbertcurve.hilbertcurve import HilbertCurve import math import prjxray.db", "graph, node_mapping) print('{} Serializing to disk.'.format(now())) capnp_graph.serialize_to_capnp( channels_obj=channels_obj, connection_box_obj=connection_box_obj, num_nodes=len(capnp_graph.graph.nodes),", ") ) (dest_net, ) = cur.fetchone() return (src_net, dest_net) return", "get_track_model import lib.rr_graph_capnp.graph2 as capnp_graph2 from prjxray_constant_site_pins import feature_when_routed from", "side == tracks.Direction.LEFT: assert left_graph_node_pkey is not None, (tile_type, pin_name)", "t_del=intrinsic_delay, p_cost=penalty_cost, ), sizing=graph2.SwitchSizing( mux_trans_size=0, buf_size=0, ), ) ) #", "for name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \\ switch_type in cur.execute(\"\"\"", "wire_in_tile_pkey return get_site_as_tile_wire def import_graph_nodes(conn, graph, node_mapping, connection_box_map): cur =", "FROM graph_node WHERE connection_box_wire_pkey IS NOT NULL ) );\"\"\" )", "= rebuf_wire_regexp.fullmatch(rebuf_wire_name) if m.group(2) == 'TOP': REBUF_NODES[node_pkey].append( '{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1)) )", "= HilbertCurve(p, N) idx = 0 id_map = {} for", "wire = 'inpad' elif pin['port_type'] == 'VCC': tile_type = 'SYN-VCC'", "create_get_site_as_tile_wire(cur) for node_idx, node in enumerate(graph.nodes): if node.type not in", "= create_get_site_as_tile_wire(cur) for node_idx, node in enumerate(graph.nodes): if node.type not", "# despite they are found on the TOP or BOTTOM", "pip name that # changes based on which IOI_SING is", "This is the value to attach to the source pip", "graph2.Node(**node_dict) def update_connection_box( conn, graph, graph_node_pkey, node_idx, connection_box_map ): \"\"\"", "sqlite3 now = datetime.datetime.now HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+') CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)')", "IOB33S sites. # They are used in differential input mode.", "bottom) # # Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value = '1'", "for tile, wire_name in cur: REBUF_SOURCES[(tile, wire_name)] = node_pkey HCLK_CMT_TILES", "grid_x, grid_y in cur.execute(\"\"\" SELECT pkey, site_as_tile_pkey, grid_x, grid_y FROM", "FROM segment WHERE pkey = ( SELECT segment_pkey FROM track", "only if its there on the list. \"\"\" tile =", ");\"\"\" ) connection_box_map = {} for wire_in_tile_pkey, tile_type_pkey, wire_name in", "results = cur.fetchall() assert len(results) == 1 wire_in_tile_pkey = results[0][0]", "x_high, y_low, y_high, ptc, capacitance, resistance) in progressbar_utils.progressbar(cur.execute(\"\"\" SELECT pkey,", "FROM wire_in_tile WHERE pkey = ?;\"\"\", (src_wire_in_tile_pkey, ) ) (src_net,", "alive_tracks: continue cur2.execute( \"\"\" SELECT name FROM segment WHERE pkey", "50 to get the relative # position of the IOI_SING", "= {} for block_type in graph.block_types: block_types[block_type.name] = block_type.id assert", "== \"IOB_DIFFI_IN0\": return '' # REBUF stuff rebuf_key = (feature_path[0],", "create_channels(conn): cur = conn.cursor() cur.execute( \"\"\" SELECT chan_width_max, x_min, x_max,", "len( list( filter( lambda t: t['port_type'] == 'input', synth_tile['pins'] )", "enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\\'b1'.format( feature_path[0], bufhce_loc ) return ' '.join((feature, enable_bufhce_in_use,", "site_as_tile_pkey = get_tile_and_site_as_tile_pkey( node.loc.x_low, node.loc.y_low ) if site_as_tile_pkey is not", "hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT': HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc( (hclk_cmt_x,", "? AND phy_tile_type_pkey IN ( SELECT tile_type_pkey FROM phy_tile WHERE", ") = cur.fetchone() get_tile_name = create_get_tile_name(conn) get_pip_wire_names = create_get_pip_wire_names(conn) switch_name_map", "a normal IOI tile, while the BOTTOM IOI_SING # shares", "cur.execute( \"\"\"SELECT name FROM wire_in_tile WHERE pkey = ?;\"\"\", (dest_wire_in_tile_pkey,", "SYN-GND and SYN-VCC tiles. \"\"\" block_types = {} for block_type", "ids in the rr_graph. Add rr_edge for each row in", "edges print('{} Importing existing edges.'.format(now())) for edge in graph.edges: yield", "tracks.Track( direction=direction, x_low=x_low, x_high=x_high, y_low=y_low, y_high=y_high, ) assert graph_node_pkey not", "t: t['port_type'] == 'input', synth_tile['pins'] ) ) ) for pin", "site_pin_switch_pkey FROM wire_in_tile WHERE pkey = ( SELECT wire_in_tile_pkey FROM", "wire.node_pkey = rebuf_nodes.node_pkey INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey", "FROM switch WHERE pkey = ?;\"\"\", (switch_pkey, ) ) (switch_name,", "return [t[0] for t in cur.fetchall()] def populate_bufg_rebuf_map(conn): global REBUF_NODES", "return get_tile_name def create_get_pip_wire_names(conn): cur = conn.cursor() @functools.lru_cache(maxsize=None) def get_pip_wire_names(pip_pkey):", "# # Vivado does not report this connection as a", "(track_pkey, ) = cur.fetchone() elif pin['port_type'] == 'GND': cur.execute('SELECT gnd_track_pkey", "parser = argparse.ArgumentParser() parser.add_argument( '--db_root', required=True, help='Project X-Ray Database' )", "get_pip_wire_names = create_get_pip_wire_names(conn) switch_name_map = {} print('{} Importing edges from", "with open(args.synth_tiles) as f: synth_tiles = json.load(f) roi = Roi(", "region column is_bottom_sing = int(m.group(2)) % 50 == 0 #", "help='Output rr_graph file' ) parser.add_argument( '--write_rr_node_map', required=True, help='Output map of", "open(args.synth_tiles) as f: synth_tiles = json.load(f) roi = Roi( db=db,", "gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y)) if gridinfo.tile_type == 'HCLK_CMT_L': HCLK_CMT_TILES[tile, 'R']", "pkey = ( SELECT site_wire_pkey FROM node WHERE pkey =", "= tracks.Track( direction=direction, x_low=x_low, x_high=x_high, y_low=y_low, y_high=y_high, ) assert graph_node_pkey", "vcc_block_id == grid_loc.block_type_id: assert vcc_loc is None vcc_loc = (grid_loc.x,", "KeyError: capnp_graph.add_switch( graph2.Switch( id=None, name=name, type=graph2.SwitchType[switch_type.upper()], timing=graph2.SwitchTiming( r=drive_resistance, c_in=0.0, c_out=0.0,", "phy_tile_pkey FROM wire WHERE pkey = ? )\"\"\", (connection_box_wire_pkey, )", "pkey FROM track WHERE alive = 1;\"): alive_tracks.add(track_pkey) print('{} Importing", "inode file' ) parser.add_argument( '--connection_database', help='Database of fabric connectivity', required=True", "\"IOB_DIFFO_IN1\": return '{}.OUT_DIFF'.format(feature_path[0]) # IOB_PADOUT0->IOB_DIFFI_IN1 # IOB_PADOUT1->IOB_DIFFI_IN0 # # These", "> 0, (pin, len(option)) if pin['port_type'] == 'input': tile_type =", "re.compile('IOI_OCLK_([01])') # Regex for [LR]IOI_SING tiles IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC',", "'--synth_tiles', help='If using an ROI, synthetic tile defintion from prjxray-arch-import'", "ZINV_CE: to disable the inverter on CE input which is", "cur.execute( \"\"\" SELECT site.site_type_pkey, site_as_tile.site_pkey FROM site_as_tile INNER JOIN site", "\"\"\" Check if enabling this feature requires other features to", "IOB operates in the differential output mode. # There is", "from database.'.format(now())) with progressbar_utils.ProgressBar(max_value=num_edges) as bar: for idx, (src_graph_node, dest_graph_node,", "FROM rebuf_wires) ORDER BY rebuf_nodes.node_pkey;\"\"\" ) for node_pkey, rebuf_tile, rebuf_wire_name", "graph.block_types: block_types[block_type.name] = block_type.id assert 'SYN-GND' in block_types assert 'SYN-VCC'", "segment_id = graph.get_segment_id_from_name('dummy') create_track_rr_graph( conn, graph, node_mapping, use_roi, roi, synth_tiles,", "xml, and so # were not emitted in rrgraph XML.", "elif pin['port_type'] == 'VCC': cur.execute('SELECT vcc_track_pkey FROM constant_sources') (track_pkey, )", "AND name = ? ) AND site_pkey = ? ;\"\"\",", "\"\"\" tile = rebuf_and_hrow_tiles[i] if tile is not None and", "of the whole # IOI/IOB column. The Y coordinate identified", "# There is no feature assosciated with that PIP in", "# These connections are hard wires that connect IOB33M and", "the rr graph. For ROI configurations, this also connects the", "rebuf_wires(wire_in_tile_pkey) AS ( SELECT pkey FROM wire_in_tile WHERE name LIKE", "assert len(results) == 1, site_as_tile_pkey return results[0] @functools.lru_cache(maxsize=0) def get_site_as_tile_wire(site_as_tile_pkey,", "count() FROM graph_edge;\" \"\") for src_graph_node, dest_graph_node in cur.execute(\"\"\" SELECT", "grid_x, grid_y, box_id, site_pin_delay ): \"\"\" Assign a connection box", "bottom half of a normal IOI tile, while the BOTTOM", "backward) in enumerate(cur.execute(\"\"\" SELECT src_graph_node_pkey, dest_graph_node_pkey, switch_pkey, phy_tile_pkey, pip_in_tile_pkey, backward", "SELECT track_pkey FROM node WHERE pkey = ( SELECT node_pkey", "dimensions. \"\"\" cur = conn.cursor() cur.execute(\"SELECT grid_x FROM phy_tile ORDER", "node_pkey FROM wire WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires)", "% 50 == 0 # This is the value to", "'is_clock': False, }, ], }, \"GND\": { 'loc': gnd_loc, 'pins':", "tracks from lib.connection_database import get_wire_pkey, get_track_model import lib.rr_graph_capnp.graph2 as capnp_graph2", "\"\"\" SELECT top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey, right_graph_node_pkey FROM wire WHERE wire_in_tile_pkey", "relative # position of the IOI_SING within the clock region", "(tile_type, pin_name) node_mapping[right_graph_node_pkey] = node.id update_connection_box( conn, graph, right_graph_node_pkey, node_idx,", "else: yield (src_node, sink_node, switch_id, ()) else: yield (src_node, sink_node,", "coords terms. \"above\" and \"below\" mean the opposite... rebuf_to_hrow_map =", "wire_in_tile WHERE pkey = ?;\"\"\", (src_wire_in_tile_pkey, ) ) (src_net, )", ") def reduce_connection_box(box): \"\"\" Reduce the number of connection boxes", "x_list = [] for idx, info in cur: x_list.append(graph2.ChannelList(idx, info))", "# is a pip. Instead of making it a pseudo-pip", "and so # were not emitted in rrgraph XML. #", "in graph.grid: if gnd_block_id == grid_loc.block_type_id: assert gnd_loc is None", "# Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1 src_value = '1' if is_bottom_sing", "FROM wire WHERE pkey IN ( SELECT connection_box_wire_pkey FROM graph_node", "def get_tile_and_site_as_tile_pkey(x, y): return tiles[(x, y)] return get_tile_and_site_as_tile_pkey def create_get_site_as_tile_wire(cur):", "edges from database.'.format(now())) with progressbar_utils.ProgressBar(max_value=num_edges) as bar: for idx, (src_graph_node,", "cur = conn.cursor() # Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles." ]
[ "s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/'", "[] for i, f in enumerate(files): print(round(i/len(files),4), f) fl =", "NO crowding # New script copied from quest - want", "for f in filters: m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:]", "np.diff(m1b) m1val = m1b[:-1] + dm1/2. fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account", "= 'tight') f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16)", "log):\",np.sum(rawN), np.log10(np.sum(rawN))) print(\"total observable (raw, log):\",np.sum(obsN), np.log10(np.sum(obsN))) print(\"total recovered (raw,", "qhRec0, qb = np.histogram(rec[\"m2\"]/rec[\"m1\"], bins=qbins) ehRec0, eb = np.histogram(rec[\"e\"], bins=ebins)", "= np.insert(histObs,0,0) for f in filters: histRec[f] = np.insert(histRec[f],0,0) #PDF", "print(\"total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print(\"total", "bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y = [0.20, 0.35, 0.50,", "[] pAll = [] pObs = [] pRec = []", "gatspy (raw, log):\",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print(\"recovered/observable*100 with gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.) print(\"###################\") print(\"total in", "= np.histogram(rec[\"appMagMean_r\"], bins=magbins) rhRec0, rb = np.histogram(rec[\"r2\"]/rec[\"r1\"], bins=rbins) m1hRec[filt] +=", "= getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs), cdf = True) print(\"fb, Phs", "-20, np.log10(365*10.)) intNorm = intCut/intAll #cutoff in percent error for", "[0.20, 0.35, 0.50, 0.70, 0.75] init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)", "bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_',", "boundary Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs), cdf = True)", "f in filters: histRec[f] = np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges, histAll/np.sum(histAll), color=c1)", "hard-soft boundary Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs), cdf =", "fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight') flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches", "#cutoff in percent error for \"recovered\" Pcut = 0.1 #assumed", "the histograms #All m1hAll = np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:] ehAll", "Nobs = len(obs.index) prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r']", "Nrec = 0. Nobs = 0. raN = 0. obN", "# Need to account for limit in input period #########################", "& (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) |", "filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']): c1 =", "eb = np.histogram(rec[\"e\"], bins=ebins) lphRec0, lpb = np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999), bins=lpbins) dhRec0,", "#plot and save the histograms saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1", "prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) &", "= 'tight') if (doIndividualPlots): fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight')", "dhObs += dhObs0/Nall*Nmult maghObs += maghObs0/Nall*Nmult rhObs += rhObs0/Nall*Nmult #Rec", "matplotlib matplotlib.use('Agg') doIndividualPlots = True from matplotlib import pyplot as", "dhAll0/np.sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black',", "lphAll0, lpb = np.histogram(np.ma.log10(data[\"p\"].values).filled(-999), bins=lpbins) dhAll0, db = np.histogram(data[\"d\"], bins=dbins)", "= eccAll peccAll['p'] = pAll # Observable dataframe peccObs['e'] =", "Lists for period and eccentricity for Andrew's circularization plots eccAll", "shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight') if", "take p and ecc from each population (all, obs, rec)", "for Andrew's circularization plots eccAll = [] eccObs = []", "'log(P [days])', 'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist')", "Baseline M67 long script -- NO crowding # New script", "baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header", "if (filt == 'all'): Nrec = len(recCombined.index) rF = Nrec/Nall", "(Andrew's AAS Poster) c2 = '#A62B1F' #Dai Red c3 =", "but limiting by the hard-soft boundary def fitRagfb(): x =", "(raw, log):\",np.sum(obsN), np.log10(np.sum(obsN))) print(\"total recovered (raw, log):\",np.sum(recN), np.log10(np.sum(recN))) print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.) print(\"###################\")", "Nlim): m1hObs0, m1b = np.histogram(obs[\"m1\"], bins=mbins) qhObs0, qb = np.histogram(obs[\"m2\"]/obs[\"m1\"],", "fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight') fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches", "m2=1*units.solMass, m3=0.5*units.solMass): Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day) #similar", "np.arange(0,1, 0.1, dtype='float') ebins = np.arange(0, 1.05, 0.05, dtype='float') lpbins", "= plt.subplots() flper, axlper = plt.subplots() fdist, axdist = plt.subplots()", "pd.read_csv(d+f, nrows=1) ###################### #NEED TO ACCOUNT FOR THE BINARY FRACTION", "f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight') f, ax = plt.subplots(subplot_kw={'projection':", "np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999), bins=lpbins) dhRec0, db = np.histogram(rec[\"d\"], bins=dbins) maghRec0, magb =", "and put them into separate file # Doing this so", "bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw =", "enumerate(files): print(round(i/len(files),4), f) fl = file_len(d+f) if (fl >= 4):", "and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y = [0.20, 0.35, 0.50, 0.70,", "import matplotlib matplotlib.use('Agg') doIndividualPlots = True from matplotlib import pyplot", "All dataframe peccAll['e'] = eccAll peccAll['p'] = pAll # Observable", "allNPrsa = [] obsNPrsa = [] recNPrsa = [] #", "rb, 'r2/r1', 'EBLSST_rhist') #make the mollweide coords = SkyCoord(RA, Dec,", "c3 = '#BF8A26' #Dali Beige fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change", "m1hObs0, m1b = np.histogram(obs[\"m1\"], bins=mbins) qhObs0, qb = np.histogram(obs[\"m2\"]/obs[\"m1\"], bins=qbins)", "= len(recCombined.index) rF = Nrec/Nall rN = Nrec/Nall*Nmult raN =", "+ 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight') f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"},", "the only place I need to normalize?) prsa = data.loc[(data['appMagMean_r']", "= 25 mbins = np.arange(0,10, 0.1, dtype='float') qbins = np.arange(0,1,", "x_0=1, alpha=-1.) fitter = fitting.LevMarLSQFitter() fit = fitter(init, x, y)", "| (twiceP < Pcut))] prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) &", "0.5 #minimum number of lines to consider in file Nlim", "-999) & ( (fullP < Pcut) | (halfP < Pcut)", "= np.zeros_like(rbins)[1:] RA = [] Dec = [] recFrac =", "THE BINARY FRACTION when combining histograms ##################### Nmult = header['clusterMass'][0]/mMean", "= ['e', 'p']) peccRec = pd.DataFrame(columns = ['e', 'p']) #Read", "filters: outline += ','+str(histRec[f][i]) outline += '\\n' fl.write(outline) if __name__", "'d (kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist') saveHist(rhAll,", "#make the mollweide coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs') lGal", "filters: # cdfRec[f] = [] # for i in range(len(histAll)):", "3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding)", "header header = pd.read_csv(d+f, nrows=1) ###################### #NEED TO ACCOUNT FOR", "print(\"###################\") print(\"total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(allNPrsa), np.log10(np.sum(allNPrsa)))", "return fit def RagNormal(x, cdf = False): mean = 5.03", "Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) # Concatenating p and ecc", "np.log10(np.sum(fileRecN))) print(\"recovered/observable*100 with gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.) print(\"###################\") print(\"total in sample (raw, log):\",np.sum(rawN),", "to account for all filters here to have more accurate", "3 files for each sub-population - append everything to these", "peccAll['p'] = pAll # Observable dataframe peccObs['e'] = eccObs peccObs['p']", "= pd.DataFrame() for filt in filters: key = filt+'LSS_PERIOD' if", "for all filters here to have more accurate numbers recCombined", "= 0.5 # ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0)", "#cdfRec = dict() #for f in filters: # cdfRec[f] =", "0. firN = 0. NallPrsa = 0. NobsPrsa = 0.", "+= maghRec0/Nall*Nmult rhRec[filt] += rhRec0/Nall*Nmult #for the mollweide if (filt", "M67 long script -- NO crowding # New script copied", "= dict() rhRec = dict() for f in filters: m1hRec[f]", "__name__ == \"__main__\": filters = ['u_', 'g_', 'r_', 'i_', 'z_',", "recNPrsa = [] # Lists for period and eccentricity for", "(f == 'all'): lw = 0.5 ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw)", "= NobsPrsa/Nall*Nmult NallPrsa = NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN) rawN.append(raN) obsN.append(obN) fileN.append(fiN)", "copied from quest - want to take p and ecc", "file Nlim = 3 if (doIndividualPlots): fmass, axmass = plt.subplots()", "0. Nobs = 0. raN = 0. obN = 0.", "all filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if (filt == 'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True)", "= np.concatenate(eccRec) pAll = np.concatenate(pAll) pObs = np.concatenate(pObs) pRec =", "axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1) #account for the binary fraction, as", "open('./eblsst_files/' + fname+'.csv','w') as fl: outline = 'binEdges,histAll,histObs' for f", "of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y =", "pRec) # Appending lists with all the p/ecc values to", "= np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:] #Observable m1hObs = np.zeros_like(mbins)[1:] qhObs", "dataframes # All dataframe peccAll['e'] = eccAll peccAll['p'] = pAll", "maghAll = np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:] #Observable m1hObs = np.zeros_like(mbins)[1:]", "don't have to run analyse each time # Can write", "ax1.set_yscale('log') ax1.set_title('Globular Clusters - Baseline', fontsize = 16) ax1.set_xlabel(xtitle) #CDF", "# print('Final Dataframes:', peccAll, peccObs, peccRec) # print(peccRec.columns) # 3", "magb = np.histogram(data[\"appMagMean_r\"], bins=magbins) rhAll0, rb = np.histogram(data[\"r2\"]/data[\"r1\"], bins=rbins) if", "flper, axlper = plt.subplots() fdist, axdist = plt.subplots() fmag, axmag", "eccAll = [] eccObs = [] eccRec = [] pAll", "= file_len(d+f) if (fl >= 4): #read in the header", "qhRec0/Nall*Nmult ehRec[filt] += ehRec0/Nall*Nmult lphRec[filt] += lphRec0/Nall*Nmult dhRec[filt] += dhRec0/Nall*Nmult", "rhAll0/Nall*Nmult #Obs obs = data.loc[data['LSM_PERIOD'] != -999] Nobs = len(obs.index)", "pAll = [] pObs = [] pRec = [] #", "p/ecc values to our dataframes # All dataframe peccAll['e'] =", "Pcut) | (halfP < Pcut) | (twiceP < Pcut))] Nrec", "if (filt == 'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if (Nrec >= Nlim):", "len(recCombined.index) rF = Nrec/Nall rN = Nrec/Nall*Nmult raN = Nmult", "with gatspy (raw, log):\",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print(\"number of binaries in recovered", "pyplot as plt def file_len(fname): i = 0 with open(fname)", "0.5)] # Appending for Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa = len(prsa.index)", "= np.histogram(obs[\"appMagMean_r\"], bins=magbins) rhObs0, rb = np.histogram(obs[\"r2\"]/obs[\"r1\"], bins=rbins) m1hObs +=", "#estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y", "ax1.step(bin_edges, histAll/np.sum(histAll), color=c1) ax1.step(bin_edges, histObs/np.sum(histObs), color=c2) for f in filters:", "fl.write(outline) if __name__ == \"__main__\": filters = ['u_', 'g_', 'r_',", "axecc = plt.subplots() flper, axlper = plt.subplots() fdist, axdist =", "(data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)", "to a text file with open('./eblsst_files/' + fname+'.csv','w') as fl:", "rhObs = np.zeros_like(rbins)[1:] #Recovered m1hRec = dict() qhRec = dict()", "= np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f] =", "= pd.read_csv(d+f, header = 2).fillna(-999) rF = 0. rN =", "#Dali Beige fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf", "lpb, 'log(P [days])', 'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)',", "= np.histogram(obs[\"m1\"], bins=mbins) qhObs0, qb = np.histogram(obs[\"m2\"]/obs[\"m1\"], bins=qbins) ehObs0, eb", "np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:]", "fname+'.pdf',format='pdf', bbox_inches = 'tight') #write to a text file with", "in range(len(histObs)): # cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) # for f in filters: #", "rN = Nrec/Nall*Nmult raN = Nmult obN = Nobs/Nall*Nmult fiN", "100, 0.2, dtype='float') #blanks for the histograms #All m1hAll =", "scenario (OC/GC, baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p'])", "plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180.,", "lpbins = np.arange(-2, 10, 0.5, dtype='float') dbins = np.arange(0, 40,", "#cdfAll = [] #cdfObs = [] #cdfRec = dict() #for", "m1hAll0, m1b = np.histogram(data[\"m1\"], bins=mbins) qhAll0, qb = np.histogram(data[\"m2\"]/data[\"m1\"], bins=qbins)", "'p']) #plot and save the histograms saveHist(m1hAll, m1hObs, m1hRec, m1b,", "= coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"},", "binary fraction fit fbFit= fitRagfb() print(fbFit) #to normalize intAll, err", "ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4) cbar = f.colorbar(mlw,", "np.histogram(obs[\"m2\"]/obs[\"m1\"], bins=qbins) ehObs0, eb = np.histogram(obs[\"e\"], bins=ebins) lphObs0, lpb =", "for f in filters: outline += ','+f+'histRec' outline += '\\n'", "#for f in filters: # lw = 1 # if", "np.arange(0, 1.05, 0.05, dtype='float') lpbins = np.arange(-2, 10, 0.5, dtype='float')", "#account for the binary fraction, as a function of mass", "magb, 'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist') #make", "'EBLSST_rhist') #make the mollweide coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs')", "header = ['e', 'p']) #plot and save the histograms saveHist(m1hAll,", "np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:] #Observable m1hObs = np.zeros_like(mbins)[1:] qhObs =", "in filters: m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f] =", "filters: histRec[f] = np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges, histAll/np.sum(histAll), color=c1) ax1.step(bin_edges, histObs/np.sum(histObs),", "[days])', 'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist') saveHist(maghAll,", "np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges, histAll/np.sum(histAll), color=c1) ax1.step(bin_edges, histObs/np.sum(histObs), color=c2) for f", "Pcut) | (twiceP < Pcut))] Nrec = len(rec.index) #I'd like", "fitRagfb() print(fbFit) #to normalize intAll, err = quad(RagNormal, -20, 20)", "cdfAll, color=c1) #ax2.step(bin_edges, cdfObs, color=c2) #for f in filters: #", "Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day) #similar to field,", "19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) &", "m1b = np.histogram(obs[\"m1\"], bins=mbins) qhObs0, qb = np.histogram(obs[\"m2\"]/obs[\"m1\"], bins=qbins) ehObs0,", "== \"__main__\": filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_',", "the histograms d = \"./input_files/\" files = os.listdir(d) IDs =", "# if (f == 'all'): # lw = 0.5 #", "'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist') saveHist(dhAll, dhObs,", "Nall = len(data.index)/intNorm ###is this correct? (and the only place", "######################### # Baseline M67 long script -- NO crowding #", "np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7)", "models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.) fitter = fitting.LevMarLSQFitter() fit = fitter(init, x,", "plt.subplots() fqrat, axqrat = plt.subplots() fecc, axecc = plt.subplots() flper,", "qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec, eb, 'e',", "alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1) #account for the binary fraction,", "of script import pandas as pd import numpy as np", "saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_',", "NobsPrsa = 0. NrecPrsa = 0. Nall = len(data.index)/intNorm ###is", "saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec,", "all the p/ecc values to our dataframes # All dataframe", "25, 1, dtype='float') rbins = np.arange(0, 100, 0.2, dtype='float') #blanks", "1 def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass): Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 +", "4): #read in the header header = pd.read_csv(d+f, nrows=1) ######################", "fecc, axecc = plt.subplots() flper, axlper = plt.subplots() fdist, axdist", "color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1)", "= np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for the hard-soft boundary Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value", "& (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p']", "######################### # Need to account for limit in input period", "# Appending for Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa = len(prsa.index) if", "'all'] #get the Raghavan binary fraction fit fbFit= fitRagfb() print(fbFit)", "= ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p']) #plot and", "0.1, 1, 8, 15] #estimates of midpoints in bins, and", "np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:]", "open(fname) as f: for i, l in enumerate(f): pass return", "coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5))", "cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180.,", "using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y = [0.20, 0.35, 0.50, 0.70, 0.75]", "(and the only place I need to normalize?) prsa =", "db = np.histogram(obs[\"d\"], bins=dbins) maghObs0, magb = np.histogram(obs[\"appMagMean_r\"], bins=magbins) rhObs0,", "= np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges, histAll/np.sum(histAll), color=c1) ax1.step(bin_edges, histObs/np.sum(histObs), color=c2) for", "letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv',", "= 0. Nobs = 0. raN = 0. obN =", "#PDF ax1.step(bin_edges, histAll/np.sum(histAll), color=c1) ax1.step(bin_edges, histObs/np.sum(histObs), color=c2) for f in", "1000 days # Dataframes to write to files later; 3", "intCut, err = quad(RagNormal, -20, np.log10(365*10.)) intNorm = intCut/intAll #cutoff", "#cdfObs = [] #cdfRec = dict() #for f in filters:", "axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0),", "= [] # for i in range(len(histAll)): # cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) #", "# Using prsa dataframes for these lists because of period", "and make the histograms d = \"./input_files/\" files = os.listdir(d)", "# lw = 1 # if (f == 'all'): #", "prsaRecCombined = prsaRecCombined.append(prsaRec) # Going to use prsaRecCombined for ecc-p", "pd import numpy as np import os from astropy.coordinates import", "np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:]", "c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster) c2 =", "axdist = plt.subplots() fmag, axmag = plt.subplots() frad, axrad =", "with ax1, ax2 histAll = np.insert(histAll,0,0) histObs = np.insert(histObs,0,0) for", "lphRec0/Nall*Nmult dhRec[filt] += dhRec0/Nall*Nmult maghRec[filt] += maghRec0/Nall*Nmult rhRec[filt] += rhRec0/Nall*Nmult", "= NrecPrsa/Nall*Nmult NobsPrsa = NobsPrsa/Nall*Nmult NallPrsa = NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN)", "'tight') flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight') fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf',", "use prsaRecCombined for ecc-p plots to account for all filters", "for f in filters: histRec[f] = np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges, histAll/np.sum(histAll),", "0.1, dtype='float') ebins = np.arange(0, 1.05, 0.05, dtype='float') lpbins =", "sub-population - append everything to these peccAll = pd.DataFrame(columns =", "halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p'])", "scipy.stats from scipy.integrate import quad #for Quest import matplotlib matplotlib.use('Agg')", "0.5, dtype='float') dbins = np.arange(0, 40, 1, dtype='float') magbins =", "FOR THE BINARY FRACTION when combining histograms ##################### Nmult =", "for i in range(len(histObs)): # cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) # for f in", "= np.arange(-2, 10, 0.5, dtype='float') dbins = np.arange(0, 40, 1,", "eccAll, eccObs, eccRec) # print('P lists:', pAll, pObs, pRec) #", "m1hAll0/Nall*Nmult qhAll += qhAll0/Nall*Nmult ehAll += ehAll0/Nall*Nmult lphAll += lphAll0/Nall*Nmult", "std = 2.28 if (cdf): return scipy.stats.norm.cdf(x,mean,std) return scipy.stats.norm.pdf(x,mean,std) def", "pd.DataFrame(columns = ['e', 'p']) #Read in all the data and", "recovered') f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight') if (doIndividualPlots): fmass.savefig('./plots/'", "# print('Ecc lists:', eccAll, eccObs, eccRec) # print('P lists:', pAll,", "plt.subplots() fecc, axecc = plt.subplots() flper, axlper = plt.subplots() fdist,", "import scipy.stats from scipy.integrate import quad #for Quest import matplotlib", "= np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:] dhObs =", "for filt in filters: key = filt+'LSS_PERIOD' if (filt ==", "Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa = len(prsa.index) if (Nall >= Nlim):", "= pd.read_csv(d+f, nrows=1) ###################### #NEED TO ACCOUNT FOR THE BINARY", "= np.histogram(data[\"appMagMean_r\"], bins=magbins) rhAll0, rb = np.histogram(data[\"r2\"]/data[\"r1\"], bins=rbins) if (doIndividualPlots):", "maghObs, maghRec, magb, 'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1',", "= 0. Nrec = 0. Nobs = 0. raN =", "color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1)", "prsaRecCombined for ecc-p plots to account for all filters eccRec.append(prsaRec['e'].values)", "log):\",np.sum(obsN), np.log10(np.sum(obsN))) print(\"total recovered (raw, log):\",np.sum(recN), np.log10(np.sum(recN))) print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.) print(\"###################\") print(\"total", "bbox_inches = 'tight') frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight') print(\"###################\")", "filt+'LSS_PERIOD' if (filt == 'all'): key = 'LSM_PERIOD' fullP =", "'p']) #Read in all the data and make the histograms", "scipy.stats.norm.pdf(x,mean,std) def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters =", "units, constants from astropy.modeling import models, fitting import scipy.stats from", "#blanks for the histograms #All m1hAll = np.zeros_like(mbins)[1:] qhAll =", "= Nobs firN = Nrec NrecPrsa = len(prsaRecCombined.index) NrecPrsa =", "prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p']", "qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f]", "peccObs = pd.DataFrame(columns = ['e', 'p']) peccRec = pd.DataFrame(columns =", "cdf = True) print(\"fb, Phs = \", fb, Phs) Nmult", "0.5 ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular Clusters -", "f in filters: # lw = 1 # if (f", "obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN),", "Nlim): #create histograms #All m1hAll0, m1b = np.histogram(data[\"m1\"], bins=mbins) qhAll0,", "binaries in tested with gatspy (raw, log):\",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print(\"number of", "lphRec0, lpb = np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999), bins=lpbins) dhRec0, db = np.histogram(rec[\"d\"], bins=dbins)", "+ 1 def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass): Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1", "cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight') if (doIndividualPlots):", "# Lists for period and eccentricity for Andrew's circularization plots", "quad(RagNormal, -20, 20) intCut, err = quad(RagNormal, -20, np.log10(365*10.)) intNorm", "raN = Nmult obN = Nobs/Nall*Nmult fiN = Nall fioN", "#ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight') #write", "np.histogram(data[\"appMagMean_r\"], bins=magbins) rhAll0, rb = np.histogram(data[\"r2\"]/data[\"r1\"], bins=rbins) if (doIndividualPlots): axmass.step(m1b[0:-1],", "#Observable m1hObs = np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:]", "import pandas as pd import numpy as np import os", "'p']) peccObs = pd.DataFrame(columns = ['e', 'p']) peccRec = pd.DataFrame(columns", "I need to normalize?) prsa = data.loc[(data['appMagMean_r'] <= 19.5) &", "- want to take p and ecc from each population", "for the hard-soft boundary Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs),", "= 0. obN = 0. fiN = 0. fioN =", "(cdf): return scipy.stats.norm.cdf(x,mean,std) return scipy.stats.norm.pdf(x,mean,std) def saveHist(histAll, histObs, histRec, bin_edges,", "ax1.step(bin_edges, histObs/np.sum(histObs), color=c2) for f in filters: lw = 1", "= dict() qhRec = dict() ehRec = dict() lphRec =", "fqrat, axqrat = plt.subplots() fecc, axecc = plt.subplots() flper, axlper", "Dec = [] recFrac = [] recN = [] rawN", "fl = file_len(d+f) if (fl >= 4): #read in the", "(raw, log):\",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print(\"total recovered in Prsa 15.8<r<19.5 P<1000d sample", "ehObs0, eb = np.histogram(obs[\"e\"], bins=ebins) lphObs0, lpb = np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999), bins=lpbins)", "fitRagfb(): x = [0.05, 0.1, 1, 8, 15] #estimates of", "= coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap =", "'massPDFall.pdf',format='pdf', bbox_inches = 'tight') fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight')", "= np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:] lphObs =", "Quest paths in this version of script import pandas as", "axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1) #account for", "[] # Using prsa dataframes for these lists because of", "maghAll0/Nall*Nmult rhAll += rhAll0/Nall*Nmult #Obs obs = data.loc[data['LSM_PERIOD'] != -999]", "\"recovered\" Pcut = 0.1 #assumed mean stellar mass mMean =", "= 3 if (doIndividualPlots): fmass, axmass = plt.subplots() fqrat, axqrat", "= 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/' +", "np.log10(np.sum(allNPrsa))) print(\"total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(obsNPrsa),", "= 'tight') fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight') frad.savefig('./plots/' +", "'#A62B1F' #Dai Red c3 = '#BF8A26' #Dali Beige fig,ax1 =", "filters: lw = 1 if (f == 'all'): lw =", "#get the Raghavan binary fraction fit fbFit= fitRagfb() print(fbFit) #to", "filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if (filt == 'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if", "bins=qbins) ehAll0, eb = np.histogram(data[\"e\"], bins=ebins) lphAll0, lpb = np.histogram(np.ma.log10(data[\"p\"].values).filled(-999),", "rN = 0. Nrec = 0. Nobs = 0. raN", "account for limit in input period ######################### ######################### # Baseline", "correct? (and the only place I need to normalize?) prsa", "# cdfRec[f] = [] # for i in range(len(histAll)): #", "= \"./input_files/\" files = os.listdir(d) IDs = [] for i,", "#Dali Blue (Andrew's AAS Poster) c2 = '#A62B1F' #Dai Red", "peccRec) # print(peccRec.columns) # 3 letter code corresponds to scenario", "import units, constants from astropy.modeling import models, fitting import scipy.stats", "= fitter(init, x, y) return fit def RagNormal(x, cdf =", "each sub-population - append everything to these peccAll = pd.DataFrame(columns", "(f == 'all'): # lw = 0.5 # ax2.step(bin_edges, cdfRec[f],", "have to run analyse each time # Can write separate", "matplotlib.use('Agg') doIndividualPlots = True from matplotlib import pyplot as plt", "place I need to normalize?) prsa = data.loc[(data['appMagMean_r'] <= 19.5)", "in range(len(histAll)): # cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) # for i in range(len(histObs)): #", "+= lphAll0/Nall*Nmult dhAll += dhAll0/Nall*Nmult maghAll += maghAll0/Nall*Nmult rhAll +=", "recovered with gatspy (raw, log):\",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print(\"recovered/observable*100 with gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.) print(\"###################\")", "5.03 std = 2.28 if (cdf): return scipy.stats.norm.cdf(x,mean,std) return scipy.stats.norm.pdf(x,mean,std)", "# Going to use prsaRecCombined for ecc-p plots to account", "np.zeros_like(rbins)[1:] #Observable m1hObs = np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:] ehObs =", "np.arange(-2, 10, 0.5, dtype='float') dbins = np.arange(0, 40, 1, dtype='float')", "to have more accurate numbers recCombined = recCombined.append(rec) prsaRecCombined =", "NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN) rawN.append(raN) obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa)", "dict() ehRec = dict() lphRec = dict() dhRec = dict()", "saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec,", "0.70, 0.75] init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.) fitter = fitting.LevMarLSQFitter()", "c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180.,", "4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf',", "axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0),", "= eccRec peccRec['p'] = pRec # print('Final Dataframes:', peccAll, peccObs,", "+= ','+f+'histRec' outline += '\\n' fl.write(outline) for i in range(len(bin_edges)):", "obN = 0. fiN = 0. fioN = 0. firN", "to scenario (OC/GC, baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e',", "= 16) ax1.set_xlabel(xtitle) #CDF #cdfAll = [] #cdfObs = []", "(data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5)", "Concatenating p and ecc lists eccAll = np.concatenate(eccAll) eccObs =", "= np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:] RA =", "1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & (", "cdfRec[f] = [] # for i in range(len(histAll)): # cdfAll.append(np.sum(histAll[:i])/np.sum(histAll))", "= [] pRec = [] # Using prsa dataframes for", "15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print(\"total recovered in Prsa", "for i, f in enumerate(files): print(round(i/len(files),4), f) fl = file_len(d+f)", "lpb = np.histogram(np.ma.log10(data[\"p\"].values).filled(-999), bins=lpbins) dhAll0, db = np.histogram(data[\"d\"], bins=dbins) maghAll0,", "np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999), bins=lpbins) dhObs0, db = np.histogram(obs[\"d\"], bins=dbins) maghObs0, magb =", "#I'd like to account for all filters here to have", "pandas as pd import numpy as np import os from", "obsNPrsa = [] recNPrsa = [] # Lists for period", "all filters here to have more accurate numbers recCombined =", "Going to use prsaRecCombined for ecc-p plots to account for", "'binEdges,histAll,histObs' for f in filters: outline += ','+f+'histRec' outline +=", "frad, axrad = plt.subplots() #bins for all the histograms Nbins", "+ 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight') flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches =", "recFrac = [] recN = [] rawN = [] obsN", "Pcut) | (twiceP < Pcut))] prsaRec = data.loc[(data['appMagMean_r'] <= 19.5)", "= [] rawN = [] obsN = [] fileN =", "filters here to have more accurate numbers recCombined = recCombined.append(rec)", "of the file data = pd.read_csv(d+f, header = 2).fillna(-999) rF", ">= Nlim): m1hObs0, m1b = np.histogram(obs[\"m1\"], bins=mbins) qhObs0, qb =", "< 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) &", "pObs.append(prsaObs['p'].values) if (Nobs >= Nlim): m1hObs0, m1b = np.histogram(obs[\"m1\"], bins=mbins)", "range(len(histObs)): # cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) # for f in filters: # for", "files (raw, log):\",np.sum(fileN), np.log10(np.sum(fileN))) print(\"number of binaries in tested with", "New script copied from quest - want to take p", "== 'all'): # lw = 0.5 # ax2.step(bin_edges, cdfRec[f], color=c3,", "data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] <", "1, 8, 15] #estimates of midpoints in bins, and using", "\"./input_files/\" files = os.listdir(d) IDs = [] for i, f", "lphObs += lphObs0/Nall*Nmult dhObs += dhObs0/Nall*Nmult maghObs += maghObs0/Nall*Nmult rhObs", "histObs/np.sum(histObs), color=c2) for f in filters: lw = 1 if", "normalize?) prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8)", "<= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000)", "= coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree f, ax", "(data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP <", "and ecc from each population (all, obs, rec) and put", "= ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header", "observable (raw, log):\",np.sum(obsN), np.log10(np.sum(obsN))) print(\"total recovered (raw, log):\",np.sum(recN), np.log10(np.sum(recN))) print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.)", "> 15.8) & (data['p'] < 1000) & (data['p'] >0.5) &", "['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all'] #get the Raghavan", "fmag, axmag = plt.subplots() frad, axrad = plt.subplots() #bins for", "16) ax1.set_xlabel(xtitle) #CDF #cdfAll = [] #cdfObs = [] #cdfRec", "= len(prsaRecCombined.index) NrecPrsa = NrecPrsa/Nall*Nmult NobsPrsa = NobsPrsa/Nall*Nmult NallPrsa =", "saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec, rb,", "file with open('./eblsst_files/' + fname+'.csv','w') as fl: outline = 'binEdges,histAll,histObs'", "#All m1hAll = np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:]", "alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1],", "+= m1hRec0/Nall*Nmult qhRec[filt] += qhRec0/Nall*Nmult ehRec[filt] += ehRec0/Nall*Nmult lphRec[filt] +=", "to field, but limiting by the hard-soft boundary def fitRagfb():", "figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r',", "[] recN = [] rawN = [] obsN = []", "Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(recNPrsa), np.log10(np.sum(recNPrsa))) print(\"Prsa 15.8<r<19.5 P<1000d", "# Baseline M67 long script -- NO crowding # New", "import SkyCoord from astropy import units, constants from astropy.modeling import", "= np.concatenate(eccAll) eccObs = np.concatenate(eccObs) eccRec = np.concatenate(eccRec) pAll =", "'tight') fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight') fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf',", "= len(data.index)/intNorm ###is this correct? (and the only place I", "- 2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key] != -999) & ( (fullP", "recCombined.append(rec) prsaRecCombined = prsaRecCombined.append(prsaRec) # Going to use prsaRecCombined for", "m1b, 'm1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)',", "fname+'.csv','w') as fl: outline = 'binEdges,histAll,histObs' for f in filters:", "'magPDFall.pdf',format='pdf', bbox_inches = 'tight') frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight')", "0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key] !=", "+= rhObs0/Nall*Nmult #Rec recCombined = pd.DataFrame() prsaRecCombined = pd.DataFrame() for", "= np.histogram(obs[\"m2\"]/obs[\"m1\"], bins=qbins) ehObs0, eb = np.histogram(obs[\"e\"], bins=ebins) lphObs0, lpb", "in filters: histRec[f] = np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges, histAll/np.sum(histAll), color=c1) ax1.step(bin_edges,", "of binaries in recovered with gatspy (raw, log):\",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print(\"recovered/observable*100", "+= qhObs0/Nall*Nmult ehObs += ehObs0/Nall*Nmult lphObs += lphObs0/Nall*Nmult dhObs +=", "dtype='float') magbins = np.arange(11, 25, 1, dtype='float') rbins = np.arange(0,", "m1hRec0/Nall*Nmult qhRec[filt] += qhRec0/Nall*Nmult ehRec[filt] += ehRec0/Nall*Nmult lphRec[filt] += lphRec0/Nall*Nmult", "np.sum(lphObs)/np.sum(obsN)) # Concatenating p and ecc lists eccAll = np.concatenate(eccAll)", "= [] obsN = [] fileN = [] fileObsN =", "magb = np.histogram(obs[\"appMagMean_r\"], bins=magbins) rhObs0, rb = np.histogram(obs[\"r2\"]/obs[\"r1\"], bins=rbins) m1hObs", "[] obsN = [] fileN = [] fileObsN = []", "0. obN = 0. fiN = 0. fioN = 0.", "fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight') fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches", "s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100.,", "pObs, pRec) # Appending lists with all the p/ecc values", "15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(recNPrsa), np.log10(np.sum(recNPrsa))) print(\"Prsa 15.8<r<19.5 P<1000d rec/obs*100:\",np.sum(recNPrsa)/np.sum(obsNPrsa)*100.)", "Can write separate script for p-ecc plots # Quest paths", "if (cdf): return scipy.stats.norm.cdf(x,mean,std) return scipy.stats.norm.pdf(x,mean,std) def saveHist(histAll, histObs, histRec,", "plt.subplots() fmag, axmag = plt.subplots() frad, axrad = plt.subplots() #bins", "ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s =", "#Recovered m1hRec = dict() qhRec = dict() ehRec = dict()", "= SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal =", "os.listdir(d) IDs = [] for i, f in enumerate(files): print(round(i/len(files),4),", "getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass): Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.) return", "= pd.DataFrame(columns = ['e', 'p']) peccRec = pd.DataFrame(columns = ['e',", "rhAll0, rb = np.histogram(data[\"r2\"]/data[\"r1\"], bins=rbins) if (doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black',", "matplotlib import pyplot as plt def file_len(fname): i = 0", "the histograms saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll,", "to normalize?) prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >", "print(\"total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa)))", "= pObs # Recovered dataframe peccRec['e'] = eccRec peccRec['p'] =", "NallPrsa = len(prsa.index) if (Nall >= Nlim): #create histograms #All", "in sample (raw, log):\",np.sum(rawN), np.log10(np.sum(rawN))) print(\"total observable (raw, log):\",np.sum(obsN), np.log10(np.sum(obsN)))", "all the histograms Nbins = 25 mbins = np.arange(0,10, 0.1,", "ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw", "40, 1, dtype='float') magbins = np.arange(11, 25, 1, dtype='float') rbins", "> 0.5)] # Appending for Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa =", "the file data = pd.read_csv(d+f, header = 2).fillna(-999) rF =", "axlper = plt.subplots() fdist, axdist = plt.subplots() fmag, axmag =", "'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs,", "= 'tight') fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight') fmag.savefig('./plots/' +", "(raw, log):\",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print(\"recovered/observable*100 with gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.) print(\"###################\") print(\"total in sample", "c=np.array(recFrac)*100., cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'%", "True) print(\"fb, Phs = \", fb, Phs) Nmult *= fb", "& (data['p'] > 0.5)] # Appending for Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values)", "+= ','+str(histRec[f][i]) outline += '\\n' fl.write(outline) if __name__ == \"__main__\":", "###is this correct? (and the only place I need to", "pd.DataFrame(columns = ['e', 'p']) peccObs = pd.DataFrame(columns = ['e', 'p'])", "axrad = plt.subplots() #bins for all the histograms Nbins =", "combining histograms ##################### Nmult = header['clusterMass'][0]/mMean #Nmult = 1. RA.append(header['OpSimRA'])", "Raghavan binary fraction fit fbFit= fitRagfb() print(fbFit) #to normalize intAll,", "= dict() maghRec = dict() rhRec = dict() for f", "nrows=1) ###################### #NEED TO ACCOUNT FOR THE BINARY FRACTION when", "0 with open(fname) as f: for i, l in enumerate(f):", "rec = data.loc[(data[key] != -999) & ( (fullP < Pcut)", "'tight') f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16)", "maghRec0, magb = np.histogram(rec[\"appMagMean_r\"], bins=magbins) rhRec0, rb = np.histogram(rec[\"r2\"]/rec[\"r1\"], bins=rbins)", "fdist, axdist = plt.subplots() fmag, axmag = plt.subplots() frad, axrad", "(data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) &", "= np.histogram(np.ma.log10(data[\"p\"].values).filled(-999), bins=lpbins) dhAll0, db = np.histogram(data[\"d\"], bins=dbins) maghAll0, magb", "#assumed mean stellar mass mMean = 0.5 #minimum number of", "rb = np.histogram(rec[\"r2\"]/rec[\"r1\"], bins=rbins) m1hRec[filt] += m1hRec0/Nall*Nmult qhRec[filt] += qhRec0/Nall*Nmult", "p and ecc from each population (all, obs, rec) and", "Pcut = 0.1 #assumed mean stellar mass mMean = 0.5", "normalize intAll, err = quad(RagNormal, -20, 20) intCut, err =", "np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:]", "Nall fioN = Nobs firN = Nrec NrecPrsa = len(prsaRecCombined.index)", "'r_', 'i_', 'z_', 'y_','all']): c1 = '#5687A6' #Dali Blue (Andrew's", "def fitRagfb(): x = [0.05, 0.1, 1, 8, 15] #estimates", "ehRec0/Nall*Nmult lphRec[filt] += lphRec0/Nall*Nmult dhRec[filt] += dhRec0/Nall*Nmult maghRec[filt] += maghRec0/Nall*Nmult", "qb = np.histogram(data[\"m2\"]/data[\"m1\"], bins=qbins) ehAll0, eb = np.histogram(data[\"e\"], bins=ebins) lphAll0,", "err = quad(RagNormal, -20, np.log10(365*10.)) intNorm = intCut/intAll #cutoff in", "bins=lpbins) dhObs0, db = np.histogram(obs[\"d\"], bins=dbins) maghObs0, magb = np.histogram(obs[\"appMagMean_r\"],", "np.histogram(rec[\"e\"], bins=ebins) lphRec0, lpb = np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999), bins=lpbins) dhRec0, db =", "lphRec, lpb, 'log(P [days])', 'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec, db, 'd", "'m1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist')", "and save the histograms saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)',", "ax2 histAll = np.insert(histAll,0,0) histObs = np.insert(histObs,0,0) for f in", "+= '\\n' fl.write(outline) for i in range(len(bin_edges)): outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i])", "Using prsa dataframes for these lists because of period cutoff", "s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)),", "np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) # Concatenating p and ecc lists eccAll", "cutoff at 1000 days # Dataframes to write to files", "bins=mbins) qhObs0, qb = np.histogram(obs[\"m2\"]/obs[\"m1\"], bins=qbins) ehObs0, eb = np.histogram(obs[\"e\"],", "dataframes for these lists because of period cutoff at 1000", "account for all filters here to have more accurate numbers", "'lperPDFall.pdf',format='pdf', bbox_inches = 'tight') fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight')", "color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1)", "# ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/' +", "= 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r',", "##################### Nmult = header['clusterMass'][0]/mMean #Nmult = 1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read", "= pd.DataFrame() prsaRecCombined = pd.DataFrame() for filt in filters: key", "+= rhRec0/Nall*Nmult #for the mollweide if (filt == 'all'): Nrec", "np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:]", "Nrec/Nall rN = Nrec/Nall*Nmult raN = Nmult obN = Nobs/Nall*Nmult", "fraction, as a function of mass dm1 = np.diff(m1b) m1val", "lw = 1 # if (f == 'all'): # lw", "peccAll['e'] = eccAll peccAll['p'] = pAll # Observable dataframe peccObs['e']", "ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) cbar = f.colorbar(mlw,", "= np.arange(11, 25, 1, dtype='float') rbins = np.arange(0, 100, 0.2,", "print(\"number of binaries in input files (raw, log):\",np.sum(fileN), np.log10(np.sum(fileN))) print(\"number", "qhObs0/Nall*Nmult ehObs += ehObs0/Nall*Nmult lphObs += lphObs0/Nall*Nmult dhObs += dhObs0/Nall*Nmult", "= [] fileN = [] fileObsN = [] fileRecN =", "each time # Can write separate script for p-ecc plots", "prsaRecCombined.append(prsaRec) # Going to use prsaRecCombined for ecc-p plots to", "#print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) #", "sample (raw, log):\",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print(\"total observable in Prsa 15.8<r<19.5 P<1000d", "fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0),", "# Appending lists with all the p/ecc values to our", "lphObs = np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:] rhObs", "cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches", "1 if (f == 'all'): lw = 0.5 ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]),", "np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:]", "ehObs, ehRec, eb, 'e', 'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec, lpb, 'log(P", "m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec, qb,", "fl: outline = 'binEdges,histAll,histObs' for f in filters: outline +=", "qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec, eb,", "the histograms Nbins = 25 mbins = np.arange(0,10, 0.1, dtype='float')", "qbins = np.arange(0,1, 0.1, dtype='float') ebins = np.arange(0, 1.05, 0.05,", "bbox_inches = 'tight') flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight') fdist.savefig('./plots/'", "recN = [] rawN = [] obsN = [] fileN", "return i + 1 def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass): Phs", "= coords.dec.wrap_at(180.*units.degree).degree f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16)", "here to have more accurate numbers recCombined = recCombined.append(rec) prsaRecCombined", "RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read in rest of the file data =", "(doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1],", "= 0. firN = 0. NallPrsa = 0. NobsPrsa =", "( (fullP < Pcut) | (halfP < Pcut) | (twiceP", "len(rec.index) #I'd like to account for all filters here to", "= 0. fioN = 0. firN = 0. NallPrsa =", "#ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)", "#to normalize intAll, err = quad(RagNormal, -20, 20) intCut, err", "fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight') fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches", "import models, fitting import scipy.stats from scipy.integrate import quad #for", "fraction fit fbFit= fitRagfb() print(fbFit) #to normalize intAll, err =", "db, 'd (kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist')", "to account for all filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if (filt ==", ">= 4): #read in the header header = pd.read_csv(d+f, nrows=1)", "= dict() ehRec = dict() lphRec = dict() dhRec =", "fitter = fitting.LevMarLSQFitter() fit = fitter(init, x, y) return fit", "= plt.subplots() fdist, axdist = plt.subplots() fmag, axmag = plt.subplots()", "prsaRecCombined = pd.DataFrame() for filt in filters: key = filt+'LSS_PERIOD'", "'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec,", "ehAll0/np.sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black',", "'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist') #make the", "the p/ecc values to our dataframes # All dataframe peccAll['e']", "= ['e', 'p']) #Read in all the data and make", "bbox_inches = 'tight') #write to a text file with open('./eblsst_files/'", "'all'): key = 'LSM_PERIOD' fullP = abs(data[key] - data['p'])/data['p'] halfP", "separate file # Doing this so we don't have to", "np.log10(365*10.)) intNorm = intCut/intAll #cutoff in percent error for \"recovered\"", "= 0. NrecPrsa = 0. Nall = len(data.index)/intNorm ###is this", "lines to consider in file Nlim = 3 if (doIndividualPlots):", "SkyCoord from astropy import units, constants from astropy.modeling import models,", "np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) # Concatenating p and ecc lists", "'z_', 'y_','all']): c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster)", "= Nrec NrecPrsa = len(prsaRecCombined.index) NrecPrsa = NrecPrsa/Nall*Nmult NobsPrsa =", "lphAll = np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:] rhAll", "bins=ebins) lphAll0, lpb = np.histogram(np.ma.log10(data[\"p\"].values).filled(-999), bins=lpbins) dhAll0, db = np.histogram(data[\"d\"],", "np.histogram(obs[\"r2\"]/obs[\"r1\"], bins=rbins) m1hObs += m1hObs0/Nall*Nmult qhObs += qhObs0/Nall*Nmult ehObs +=", "dhRec0/Nall*Nmult maghRec[filt] += maghRec0/Nall*Nmult rhRec[filt] += rhRec0/Nall*Nmult #for the mollweide", "maghRec[filt] += maghRec0/Nall*Nmult rhRec[filt] += rhRec0/Nall*Nmult #for the mollweide if", "'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs,", "in enumerate(f): pass return i + 1 def getPhs(sigma, m1=1*units.solMass,", "rb = np.histogram(obs[\"r2\"]/obs[\"r1\"], bins=rbins) m1hObs += m1hObs0/Nall*Nmult qhObs += qhObs0/Nall*Nmult", "peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p'])", "# cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges, cdfAll, color=c1) #ax2.step(bin_edges, cdfObs, color=c2) #for f", "= np.concatenate(pAll) pObs = np.concatenate(pObs) pRec = np.concatenate(pRec) # print('Ecc", "magb = np.histogram(rec[\"appMagMean_r\"], bins=magbins) rhRec0, rb = np.histogram(rec[\"r2\"]/rec[\"r1\"], bins=rbins) m1hRec[filt]", "c2 = '#A62B1F' #Dai Red c3 = '#BF8A26' #Dali Beige", "this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y = [0.20, 0.35, 0.50, 0.70, 0.75] init", "== 'all'): Nrec = len(recCombined.index) rF = Nrec/Nall rN =", "unit=(units.degree, units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap =", "all the data and make the histograms d = \"./input_files/\"", "+= m1hAll0/Nall*Nmult qhAll += qhAll0/Nall*Nmult ehAll += ehAll0/Nall*Nmult lphAll +=", "to write to files later; 3 files for each sub-population", "+ 'magPDFall.pdf',format='pdf', bbox_inches = 'tight') frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches =", "coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree", "return scipy.stats.norm.cdf(x,mean,std) return scipy.stats.norm.pdf(x,mean,std) def saveHist(histAll, histObs, histRec, bin_edges, xtitle,", "Pcut))] prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) &", "'tight') fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight') frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf',", "histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_',", "for these lists because of period cutoff at 1000 days", "qhObs += qhObs0/Nall*Nmult ehObs += ehObs0/Nall*Nmult lphObs += lphObs0/Nall*Nmult dhObs", "return Phs.decompose().to(units.day) #similar to field, but limiting by the hard-soft", "fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']): c1", "percent error for \"recovered\" Pcut = 0.1 #assumed mean stellar", "& (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)] NobsPrsa = len(prsaObs.index)", "pd.DataFrame() for filt in filters: key = filt+'LSS_PERIOD' if (filt", "= np.histogram(obs[\"e\"], bins=ebins) lphObs0, lpb = np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999), bins=lpbins) dhObs0, db", "= f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches =", "cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges, cdfAll, color=c1) #ax2.step(bin_edges, cdfObs, color=c2) #for f in", "peccObs, peccRec) # print(peccRec.columns) # 3 letter code corresponds to", "midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y = [0.20,", "(raw, log):\",np.sum(rawN), np.log10(np.sum(rawN))) print(\"total observable (raw, log):\",np.sum(obsN), np.log10(np.sum(obsN))) print(\"total recovered", "qhAll += qhAll0/Nall*Nmult ehAll += ehAll0/Nall*Nmult lphAll += lphAll0/Nall*Nmult dhAll", "obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN),", "Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print(\"total observable in", "(data['p'] > 0.5)] # Appending for Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa", "bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree f,", "(filt == 'all'): Nrec = len(recCombined.index) rF = Nrec/Nall rN", "[] #cdfObs = [] #cdfRec = dict() #for f in", "ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw", "i in range(len(histAll)): # cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) # for i in range(len(histObs)):", "rhObs += rhObs0/Nall*Nmult #Rec recCombined = pd.DataFrame() prsaRecCombined = pd.DataFrame()", "+ 'qPDFall.pdf',format='pdf', bbox_inches = 'tight') fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches =", "fiN = 0. fioN = 0. firN = 0. NallPrsa", "qb, 'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist')", "= [] # Lists for period and eccentricity for Andrew's", "RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree f, ax = plt.subplots(subplot_kw={'projection':", "(data['p'] < 1000) & (data['p'] > 0.5)] # Appending for", "print(round(i/len(files),4), f) fl = file_len(d+f) if (fl >= 4): #read", "y = [0.20, 0.35, 0.50, 0.70, 0.75] init = models.PowerLaw1D(amplitude=0.5,", "from astropy.coordinates import SkyCoord from astropy import units, constants from", "#bins for all the histograms Nbins = 25 mbins =", "period and eccentricity for Andrew's circularization plots eccAll = []", "peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p'])", "quad #for Quest import matplotlib matplotlib.use('Agg') doIndividualPlots = True from", "axmass = plt.subplots() fqrat, axqrat = plt.subplots() fecc, axecc =", "fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec,", "Dec, unit=(units.degree, units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap", "ecc-p plots to account for all filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if", "crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e',", "['e', 'p']) #plot and save the histograms saveHist(m1hAll, m1hObs, m1hRec,", "in filters: # lw = 1 # if (f ==", "= data.loc[data['LSM_PERIOD'] != -999] Nobs = len(obs.index) prsaObs = data.loc[(data['appMagMean_r']", "= quad(RagNormal, -20, 20) intCut, err = quad(RagNormal, -20, np.log10(365*10.))", "plots eccAll = [] eccObs = [] eccRec = []", "15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD']", "dtype='float') rbins = np.arange(0, 100, 0.2, dtype='float') #blanks for the", "= [] pObs = [] pRec = [] # Using", "outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f in filters: outline += ','+str(histRec[f][i])", ">0.5) & (data['LSM_PERIOD'] != -999)] NobsPrsa = len(prsaObs.index) # Appending", "np.arange(0, 40, 1, dtype='float') magbins = np.arange(11, 25, 1, dtype='float')", "f in filters: outline += ','+str(histRec[f][i]) outline += '\\n' fl.write(outline)", "#ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4)", "ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular Clusters - Baseline',", "with open('./eblsst_files/' + fname+'.csv','w') as fl: outline = 'binEdges,histAll,histObs' for", "from astropy import units, constants from astropy.modeling import models, fitting", "np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:]", "np.histogram(data[\"m1\"], bins=mbins) qhAll0, qb = np.histogram(data[\"m2\"]/data[\"m1\"], bins=qbins) ehAll0, eb =", "frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight') print(\"###################\") print(\"number of binaries", "= np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999), bins=lpbins) dhRec0, db = np.histogram(rec[\"d\"], bins=dbins) maghRec0, magb", "np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:]", "NrecPrsa = NrecPrsa/Nall*Nmult NobsPrsa = NobsPrsa/Nall*Nmult NallPrsa = NallPrsa/Nall*Nmult recFrac.append(rF)", "script copied from quest - want to take p and", "log):\",np.sum(fileN), np.log10(np.sum(fileN))) print(\"number of binaries in tested with gatspy (raw,", "range(len(histRec[f])): # cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges, cdfAll, color=c1) #ax2.step(bin_edges, cdfObs, color=c2) #for", "np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:] #Observable m1hObs =", "(fullP < Pcut) | (halfP < Pcut) | (twiceP <", "axqrat = plt.subplots() fecc, axecc = plt.subplots() flper, axlper =", "ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s", "NrecPrsa = len(prsaRecCombined.index) NrecPrsa = NrecPrsa/Nall*Nmult NobsPrsa = NobsPrsa/Nall*Nmult NallPrsa", "bins=qbins) ehObs0, eb = np.histogram(obs[\"e\"], bins=ebins) lphObs0, lpb = np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999),", "change to include cdf with ax1, ax2 histAll = np.insert(histAll,0,0)", "= 2.28 if (cdf): return scipy.stats.norm.cdf(x,mean,std) return scipy.stats.norm.pdf(x,mean,std) def saveHist(histAll,", "ehRec, eb, 'e', 'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])',", "mean stellar mass mMean = 0.5 #minimum number of lines", ">0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut)", "lists eccAll = np.concatenate(eccAll) eccObs = np.concatenate(eccObs) eccRec = np.concatenate(eccRec)", "pRec = [] # Using prsa dataframes for these lists", "'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist') #make the mollweide", "Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs), cdf = True) print(\"fb,", "FRACTION when combining histograms ##################### Nmult = header['clusterMass'][0]/mMean #Nmult =", "'all'): # lw = 0.5 # ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw)", "= [] recN = [] rawN = [] obsN =", "2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key] != -999) & ( (fullP <", "values to our dataframes # All dataframe peccAll['e'] = eccAll", "this so we don't have to run analyse each time", "bbox_inches = 'tight') fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight') fmag.savefig('./plots/'", "2).fillna(-999) rF = 0. rN = 0. Nrec = 0.", "m1hRec0, m1b = np.histogram(rec[\"m1\"], bins=mbins) qhRec0, qb = np.histogram(rec[\"m2\"]/rec[\"m1\"], bins=qbins)", "# New script copied from quest - want to take", "script -- NO crowding # New script copied from quest", "later; 3 files for each sub-population - append everything to", "TO ACCOUNT FOR THE BINARY FRACTION when combining histograms #####################", "l in enumerate(f): pass return i + 1 def getPhs(sigma,", "1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read in rest of the file data", "ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s =", "NobsPrsa/Nall*Nmult NallPrsa = NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN) rawN.append(raN) obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN)", "= 'tight') flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight') fdist.savefig('./plots/' +", "0.05, dtype='float') lpbins = np.arange(-2, 10, 0.5, dtype='float') dbins =", "files later; 3 files for each sub-population - append everything", "the hard-soft boundary Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs), cdf", "np.histogram(np.ma.log10(data[\"p\"].values).filled(-999), bins=lpbins) dhAll0, db = np.histogram(data[\"d\"], bins=dbins) maghAll0, magb =", "color=c3, linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular Clusters - Baseline', fontsize =", "'i_', 'z_', 'y_', 'all'] #get the Raghavan binary fraction fit", "m1b[:-1] + dm1/2. fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for the hard-soft", "header = 2).fillna(-999) rF = 0. rN = 0. Nrec", "= Nobs/Nall*Nmult fiN = Nall fioN = Nobs firN =", "fioN = Nobs firN = Nrec NrecPrsa = len(prsaRecCombined.index) NrecPrsa", "fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight') frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches", "np.insert(histObs,0,0) for f in filters: histRec[f] = np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges,", "np.histogram(data[\"d\"], bins=dbins) maghAll0, magb = np.histogram(data[\"appMagMean_r\"], bins=magbins) rhAll0, rb =", "(fl >= 4): #read in the header header = pd.read_csv(d+f,", "= np.histogram(data[\"m1\"], bins=mbins) qhAll0, qb = np.histogram(data[\"m2\"]/data[\"m1\"], bins=qbins) ehAll0, eb", "data['p'])/data['p'] halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key] -", "fl.write(outline) for i in range(len(bin_edges)): outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f", "header['clusterMass'][0]/mMean #Nmult = 1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read in rest of", "(Nobs >= Nlim): m1hObs0, m1b = np.histogram(obs[\"m1\"], bins=mbins) qhObs0, qb", "bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw =", "19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p']", "fullP = abs(data[key] - data['p'])/data['p'] halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p'])", "np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7)", "+= ehRec0/Nall*Nmult lphRec[filt] += lphRec0/Nall*Nmult dhRec[filt] += dhRec0/Nall*Nmult maghRec[filt] +=", "= pd.DataFrame(columns = ['e', 'p']) #Read in all the data", "db = np.histogram(data[\"d\"], bins=dbins) maghAll0, magb = np.histogram(data[\"appMagMean_r\"], bins=magbins) rhAll0,", "saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec, lpb,", "units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree", "eccRec) # print('P lists:', pAll, pObs, pRec) # Appending lists", "'all'): Nrec = len(recCombined.index) rF = Nrec/Nall rN = Nrec/Nall*Nmult", "eccRec = np.concatenate(eccRec) pAll = np.concatenate(pAll) pObs = np.concatenate(pObs) pRec", "fit fbFit= fitRagfb() print(fbFit) #to normalize intAll, err = quad(RagNormal,", "the header header = pd.read_csv(d+f, nrows=1) ###################### #NEED TO ACCOUNT", "np.insert(histAll,0,0) histObs = np.insert(histObs,0,0) for f in filters: histRec[f] =", "in rest of the file data = pd.read_csv(d+f, header =", "= np.histogram(rec[\"e\"], bins=ebins) lphRec0, lpb = np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999), bins=lpbins) dhRec0, db", "Pcut))] Nrec = len(rec.index) #I'd like to account for all", "+= rhAll0/Nall*Nmult #Obs obs = data.loc[data['LSM_PERIOD'] != -999] Nobs =", "histograms Nbins = 25 mbins = np.arange(0,10, 0.1, dtype='float') qbins", "AAS Poster) c2 = '#A62B1F' #Dai Red c3 = '#BF8A26'", "of binaries in input files (raw, log):\",np.sum(fileN), np.log10(np.sum(fileN))) print(\"number of", "lphRec = dict() dhRec = dict() maghRec = dict() rhRec", "dict() #for f in filters: # cdfRec[f] = [] #", "f in filters: # cdfRec[f] = [] # for i", "have more accurate numbers recCombined = recCombined.append(rec) prsaRecCombined = prsaRecCombined.append(prsaRec)", "numpy as np import os from astropy.coordinates import SkyCoord from", "fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1,", "Dec.append(header['OpSimDec']) #read in rest of the file data = pd.read_csv(d+f,", "shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight') f,", "'qPDFall.pdf',format='pdf', bbox_inches = 'tight') fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight')", "maghObs = np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:] #Recovered m1hRec = dict()", "Nlim = 3 if (doIndividualPlots): fmass, axmass = plt.subplots() fqrat,", "lpb = np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999), bins=lpbins) dhObs0, db = np.histogram(obs[\"d\"], bins=dbins) maghObs0,", "> 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]", "len(prsaObs.index) # Appending for Andrew's files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if (Nobs", "(data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)]", "(m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist') saveHist(lphAll, lphObs,", "m1hAll = np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:] lphAll", "= ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) cbar =", "#Rec recCombined = pd.DataFrame() prsaRecCombined = pd.DataFrame() for filt in", "ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/' + fname+'.pdf',format='pdf',", "db = np.histogram(rec[\"d\"], bins=dbins) maghRec0, magb = np.histogram(rec[\"appMagMean_r\"], bins=magbins) rhRec0,", "error for \"recovered\" Pcut = 0.1 #assumed mean stellar mass", "*= RagNormal(np.log10(Phs), cdf = True) print(\"fb, Phs = \", fb,", "(raw, log):\",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print(\"total observable in Prsa 15.8<r<19.5 P<1000d sample", "Clusters - Baseline', fontsize = 16) ax1.set_xlabel(xtitle) #CDF #cdfAll =", "Nmult *= fb m1hAll += m1hAll0/Nall*Nmult qhAll += qhAll0/Nall*Nmult ehAll", "for i in range(len(bin_edges)): outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f in", "bbox_inches = 'tight') fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight') fecc.savefig('./plots/'", "recCombined = pd.DataFrame() prsaRecCombined = pd.DataFrame() for filt in filters:", "dict() qhRec = dict() ehRec = dict() lphRec = dict()", "coords.dec.wrap_at(180.*units.degree).degree f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16)", "+ 'radPDFall.pdf',format='pdf', bbox_inches = 'tight') print(\"###################\") print(\"number of binaries in", "range(len(histAll)): # cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) # for i in range(len(histObs)): # cdfObs.append(np.sum(histObs[:i])/np.sum(histObs))", "fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight') #write to a text", "peccAll = pd.DataFrame(columns = ['e', 'p']) peccObs = pd.DataFrame(columns =", "in enumerate(files): print(round(i/len(files),4), f) fl = file_len(d+f) if (fl >=", "1.05, 0.05, dtype='float') lpbins = np.arange(-2, 10, 0.5, dtype='float') dbins", "np.arange(11, 25, 1, dtype='float') rbins = np.arange(0, 100, 0.2, dtype='float')", "1, dtype='float') rbins = np.arange(0, 100, 0.2, dtype='float') #blanks for", "for all filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if (filt == 'all'): recCombined.drop_duplicates(inplace=True)", "crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header =", "2.28 if (cdf): return scipy.stats.norm.cdf(x,mean,std) return scipy.stats.norm.pdf(x,mean,std) def saveHist(histAll, histObs,", "Need to account for limit in input period ######################### #########################", "'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p']) #plot and save the", "header = ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv',", "prsaRecCombined.drop_duplicates(inplace=True) if (Nrec >= Nlim): m1hRec0, m1b = np.histogram(rec[\"m1\"], bins=mbins)", "histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular Clusters - Baseline', fontsize", "= np.arange(0,10, 0.1, dtype='float') qbins = np.arange(0,1, 0.1, dtype='float') ebins", "log):\",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print(\"recovered/observable*100 with gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.) print(\"###################\") print(\"total in sample (raw,", "(data['p'] >0.5) & (data['LSM_PERIOD'] != -999)] NobsPrsa = len(prsaObs.index) #", "bins=qbins) ehRec0, eb = np.histogram(rec[\"e\"], bins=ebins) lphRec0, lpb = np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999),", "lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f]", "[] pObs = [] pRec = [] # Using prsa", "plt.subplots() fdist, axdist = plt.subplots() fmag, axmag = plt.subplots() frad,", "filters: m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:]", "#All m1hAll0, m1b = np.histogram(data[\"m1\"], bins=mbins) qhAll0, qb = np.histogram(data[\"m2\"]/data[\"m1\"],", "recovered') f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight') f, ax =", "maghRec = dict() rhRec = dict() for f in filters:", "in range(len(bin_edges)): outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f in filters: outline", "histograms ##################### Nmult = header['clusterMass'][0]/mMean #Nmult = 1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec'])", "dtype='float') ebins = np.arange(0, 1.05, 0.05, dtype='float') lpbins = np.arange(-2,", "= plt.subplots() #bins for all the histograms Nbins = 25", "+= ehAll0/Nall*Nmult lphAll += lphAll0/Nall*Nmult dhAll += dhAll0/Nall*Nmult maghAll +=", "cdfObs, color=c2) #for f in filters: # lw = 1", "= 0. NallPrsa = 0. NobsPrsa = 0. NrecPrsa =", "= eccObs peccObs['p'] = pObs # Recovered dataframe peccRec['e'] =", "alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1],", "= dict() #for f in filters: # cdfRec[f] = []", "text file with open('./eblsst_files/' + fname+'.csv','w') as fl: outline =", "bbox_inches = 'tight') fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight') flper.savefig('./plots/'", "sharex=True)#can change to include cdf with ax1, ax2 histAll =", "in filters: lw = 1 if (f == 'all'): lw", "dhAll = np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:] #Observable", "rawN.append(raN) obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN),", "peccRec['p'] = pRec # print('Final Dataframes:', peccAll, peccObs, peccRec) #", "= ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']): c1 = '#5687A6'", "= [] recFrac = [] recN = [] rawN =", "paths in this version of script import pandas as pd", "abs(data[key] - data['p'])/data['p'] halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP =", "by the hard-soft boundary def fitRagfb(): x = [0.05, 0.1,", "histObs = np.insert(histObs,0,0) for f in filters: histRec[f] = np.insert(histRec[f],0,0)", "Appending for Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa = len(prsa.index) if (Nall", "plt.subplots() frad, axrad = plt.subplots() #bins for all the histograms", "of mass dm1 = np.diff(m1b) m1val = m1b[:-1] + dm1/2.", "[] fileRecN = [] allNPrsa = [] obsNPrsa = []", "= np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:] maghObs =", "population (all, obs, rec) and put them into separate file", "fb, Phs) Nmult *= fb m1hAll += m1hAll0/Nall*Nmult qhAll +=", "with gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.) print(\"###################\") print(\"total in sample (raw, log):\",np.sum(rawN), np.log10(np.sum(rawN))) print(\"total", "fitter(init, x, y) return fit def RagNormal(x, cdf = False):", "outline += ','+str(histRec[f][i]) outline += '\\n' fl.write(outline) if __name__ ==", "color='black', alpha=0.1) #account for the binary fraction, as a function", "bins=magbins) rhObs0, rb = np.histogram(obs[\"r2\"]/obs[\"r1\"], bins=rbins) m1hObs += m1hObs0/Nall*Nmult qhObs", "# for f in filters: # for i in range(len(histRec[f])):", "this version of script import pandas as pd import numpy", "'g_', 'r_', 'i_', 'z_', 'y_', 'all'] #get the Raghavan binary", "key = filt+'LSS_PERIOD' if (filt == 'all'): key = 'LSM_PERIOD'", "= True) print(\"fb, Phs = \", fb, Phs) Nmult *=", "!= -999) & ( (fullP < Pcut) | (halfP <", "np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) # Concatenating p", "# Observable dataframe peccObs['e'] = eccObs peccObs['p'] = pObs #", "for ecc-p plots to account for all filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values)", "mbins = np.arange(0,10, 0.1, dtype='float') qbins = np.arange(0,1, 0.1, dtype='float')", "+= ehObs0/Nall*Nmult lphObs += lphObs0/Nall*Nmult dhObs += dhObs0/Nall*Nmult maghObs +=", "fileN = [] fileObsN = [] fileRecN = [] allNPrsa", "if (Nobs >= Nlim): m1hObs0, m1b = np.histogram(obs[\"m1\"], bins=mbins) qhObs0,", "[] rawN = [] obsN = [] fileN = []", "dict() lphRec = dict() dhRec = dict() maghRec = dict()", "dhAll0/Nall*Nmult maghAll += maghAll0/Nall*Nmult rhAll += rhAll0/Nall*Nmult #Obs obs =", "abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p']) rec =", "Recovered dataframe peccRec['e'] = eccRec peccRec['p'] = pRec # print('Final", "cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) # for i in range(len(histObs)): # cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) # for", "number of lines to consider in file Nlim = 3", "mass dm1 = np.diff(m1b) m1val = m1b[:-1] + dm1/2. fb", "for all the histograms Nbins = 25 mbins = np.arange(0,10,", "filters: key = filt+'LSS_PERIOD' if (filt == 'all'): key =", "= Nrec/Nall rN = Nrec/Nall*Nmult raN = Nmult obN =", "np.histogram(rec[\"r2\"]/rec[\"r1\"], bins=rbins) m1hRec[filt] += m1hRec0/Nall*Nmult qhRec[filt] += qhRec0/Nall*Nmult ehRec[filt] +=", "+ m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day) #similar to field, but limiting by", "recovered (raw, log):\",np.sum(recN), np.log10(np.sum(recN))) print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.) print(\"###################\") print(\"total in Prsa 15.8<r<19.5", "from scipy.integrate import quad #for Quest import matplotlib matplotlib.use('Agg') doIndividualPlots", "np.concatenate(pObs) pRec = np.concatenate(pRec) # print('Ecc lists:', eccAll, eccObs, eccRec)", "alpha=0.1) #account for the binary fraction, as a function of", "y) return fit def RagNormal(x, cdf = False): mean =", "m1val = m1b[:-1] + dm1/2. fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for", "m1hObs = np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:] lphObs", "#Read in all the data and make the histograms d", "# Concatenating p and ecc lists eccAll = np.concatenate(eccAll) eccObs", "'tight') fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight') fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf',", "qhObs0, qb = np.histogram(obs[\"m2\"]/obs[\"m1\"], bins=qbins) ehObs0, eb = np.histogram(obs[\"e\"], bins=ebins)", "f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw", "= 1 if (f == 'all'): lw = 0.5 ax1.step(bin_edges,", "more accurate numbers recCombined = recCombined.append(rec) prsaRecCombined = prsaRecCombined.append(prsaRec) #", "intNorm = intCut/intAll #cutoff in percent error for \"recovered\" Pcut", "0.1, dtype='float') qbins = np.arange(0,1, 0.1, dtype='float') ebins = np.arange(0,", "fbFit= fitRagfb() print(fbFit) #to normalize intAll, err = quad(RagNormal, -20,", "len(data.index)/intNorm ###is this correct? (and the only place I need", "mass mMean = 0.5 #minimum number of lines to consider", "xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']):", "coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal", "SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree", "+ 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight') fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches =", "input files (raw, log):\",np.sum(fileN), np.log10(np.sum(fileN))) print(\"number of binaries in tested", "binaries in input files (raw, log):\",np.sum(fileN), np.log10(np.sum(fileN))) print(\"number of binaries", "recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(recNPrsa), np.log10(np.sum(recNPrsa))) print(\"Prsa", "peccRec['e'] = eccRec peccRec['p'] = pRec # print('Final Dataframes:', peccAll,", "with open(fname) as f: for i, l in enumerate(f): pass", "because of period cutoff at 1000 days # Dataframes to", "= [] eccRec = [] pAll = [] pObs =", "(Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll,", "= np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:] #Observable m1hObs", "m1hObs0/Nall*Nmult qhObs += qhObs0/Nall*Nmult ehObs += ehObs0/Nall*Nmult lphObs += lphObs0/Nall*Nmult", "1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)] NobsPrsa =", "pd.read_csv(d+f, header = 2).fillna(-999) rF = 0. rN = 0.", "rhObs0, rb = np.histogram(obs[\"r2\"]/obs[\"r1\"], bins=rbins) m1hObs += m1hObs0/Nall*Nmult qhObs +=", "dict() maghRec = dict() rhRec = dict() for f in", "= '#BF8A26' #Dali Beige fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to", "as fl: outline = 'binEdges,histAll,histObs' for f in filters: outline", "np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:] rhAll = np.zeros_like(rbins)[1:]", "dtype='float') qbins = np.arange(0,1, 0.1, dtype='float') ebins = np.arange(0, 1.05,", "= np.histogram(data[\"e\"], bins=ebins) lphAll0, lpb = np.histogram(np.ma.log10(data[\"p\"].values).filled(-999), bins=lpbins) dhAll0, db", "mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) cbar", "to take p and ecc from each population (all, obs,", "i + 1 def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass): Phs =", "lGal = coords.galactic.l.wrap_at(180.*units.degree).degree bGal = coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap", "m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs, qhRec, qb, 'q", "#minimum number of lines to consider in file Nlim =", "+ fname+'.csv','w') as fl: outline = 'binEdges,histAll,histObs' for f in", "print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.) print(\"###################\") print(\"total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(allNPrsa),", "= [] fileObsN = [] fileRecN = [] allNPrsa =", "f) fl = file_len(d+f) if (fl >= 4): #read in", "20) intCut, err = quad(RagNormal, -20, np.log10(365*10.)) intNorm = intCut/intAll", "if (f == 'all'): lw = 0.5 ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3,", "axmag = plt.subplots() frad, axrad = plt.subplots() #bins for all", "np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) # Concatenating p and", "histAll = np.insert(histAll,0,0) histObs = np.insert(histObs,0,0) for f in filters:", "maghObs0, magb = np.histogram(obs[\"appMagMean_r\"], bins=magbins) rhObs0, rb = np.histogram(obs[\"r2\"]/obs[\"r1\"], bins=rbins)", "log):\",np.sum(recN), np.log10(np.sum(recN))) print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.) print(\"###################\") print(\"total in Prsa 15.8<r<19.5 P<1000d sample", "alpha=-1.) fitter = fitting.LevMarLSQFitter() fit = fitter(init, x, y) return", "= plt.subplots() frad, axrad = plt.subplots() #bins for all the", "15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print(\"total observable in Prsa", "NobsPrsa = NobsPrsa/Nall*Nmult NallPrsa = NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN) rawN.append(raN) obsN.append(obN)", "0.1 #assumed mean stellar mass mMean = 0.5 #minimum number", ">= Nlim): #create histograms #All m1hAll0, m1b = np.histogram(data[\"m1\"], bins=mbins)", "os from astropy.coordinates import SkyCoord from astropy import units, constants", ">= Nlim): m1hRec0, m1b = np.histogram(rec[\"m1\"], bins=mbins) qhRec0, qb =", "','+str(histRec[f][i]) outline += '\\n' fl.write(outline) if __name__ == \"__main__\": filters", "- 0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key]", "4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s", "for Andrew's files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if (Nobs >= Nlim): m1hObs0,", "separate script for p-ecc plots # Quest paths in this", "= np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:] #Recovered m1hRec", "dhRec = dict() maghRec = dict() rhRec = dict() for", "#ax2.step(bin_edges, cdfObs, color=c2) #for f in filters: # lw =", "pAll # Observable dataframe peccObs['e'] = eccObs peccObs['p'] = pObs", "mMean = 0.5 #minimum number of lines to consider in", "'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if (Nrec >= Nlim): m1hRec0, m1b =", "f in filters: lw = 1 if (f == 'all'):", "doIndividualPlots = True from matplotlib import pyplot as plt def", "eccentricity for Andrew's circularization plots eccAll = [] eccObs =", "qhRec = dict() ehRec = dict() lphRec = dict() dhRec", "alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1) #account", "Beige fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with", "to these peccAll = pd.DataFrame(columns = ['e', 'p']) peccObs =", "np.log10(np.sum(recN))) print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.) print(\"###################\") print(\"total in Prsa 15.8<r<19.5 P<1000d sample (raw,", "np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day) #similar to field, but limiting", "np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:] RA = []", "pRec.append(prsaRec['p'].values) if (filt == 'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if (Nrec >=", "'tight') fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight') flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf',", "filters: # lw = 1 # if (f == 'all'):", "in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print(\"total recovered", "= 0.5 ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular Clusters", "dict() rhRec = dict() for f in filters: m1hRec[f] =", "np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:]", "Decwrap = coords.dec.wrap_at(180.*units.degree).degree f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True)", "eccRec = [] pAll = [] pObs = [] pRec", "dataframe peccObs['e'] = eccObs peccObs['p'] = pObs # Recovered dataframe", "10, 0.5, dtype='float') dbins = np.arange(0, 40, 1, dtype='float') magbins", "= recCombined.append(rec) prsaRecCombined = prsaRecCombined.append(prsaRec) # Going to use prsaRecCombined", "= ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4) cbar =", "= [] pAll = [] pObs = [] pRec =", "bins=dbins) maghRec0, magb = np.histogram(rec[\"appMagMean_r\"], bins=magbins) rhRec0, rb = np.histogram(rec[\"r2\"]/rec[\"r1\"],", "# cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) # for i in range(len(histObs)): # cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) #", "outline += ','+f+'histRec' outline += '\\n' fl.write(outline) for i in", "0.50, 0.70, 0.75] init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.) fitter =", "\"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.),", "= np.histogram(rec[\"m1\"], bins=mbins) qhRec0, qb = np.histogram(rec[\"m2\"]/rec[\"m1\"], bins=qbins) ehRec0, eb", "lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec, db,", "+= m1hObs0/Nall*Nmult qhObs += qhObs0/Nall*Nmult ehObs += ehObs0/Nall*Nmult lphObs +=", "to files later; 3 files for each sub-population - append", "linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular Clusters - Baseline', fontsize = 16)", "gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.) print(\"###################\") print(\"total in sample (raw, log):\",np.sum(rawN), np.log10(np.sum(rawN))) print(\"total observable", "of lines to consider in file Nlim = 3 if", "data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000)", "rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist') #make the mollweide coords =", "= np.arange(0,1, 0.1, dtype='float') ebins = np.arange(0, 1.05, 0.05, dtype='float')", "== 'all'): lw = 0.5 ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw) ax1.set_ylabel('PDF')", "in recovered with gatspy (raw, log):\",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print(\"recovered/observable*100 with gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.)", "i, l in enumerate(f): pass return i + 1 def", "i = 0 with open(fname) as f: for i, l", "lists:', pAll, pObs, pRec) # Appending lists with all the", "in file Nlim = 3 if (doIndividualPlots): fmass, axmass =", "lw = 0.5 # ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle)", "alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1],", "\"__main__\": filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all']", "pd.DataFrame() prsaRecCombined = pd.DataFrame() for filt in filters: key =", "scipy.integrate import quad #for Quest import matplotlib matplotlib.use('Agg') doIndividualPlots =", "(data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >", "= dict() lphRec = dict() dhRec = dict() maghRec =", "-999)] NobsPrsa = len(prsaObs.index) # Appending for Andrew's files eccObs.append(prsaObs['e'].values)", "= pRec # print('Final Dataframes:', peccAll, peccObs, peccRec) # print(peccRec.columns)", "= np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f] =", "write to files later; 3 files for each sub-population -", "peccObs['e'] = eccObs peccObs['p'] = pObs # Recovered dataframe peccRec['e']", "peccObs['p'] = pObs # Recovered dataframe peccRec['e'] = eccRec peccRec['p']", "+ fname+'.pdf',format='pdf', bbox_inches = 'tight') #write to a text file", "filt in filters: key = filt+'LSS_PERIOD' if (filt == 'all'):", "np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:]", "constants from astropy.modeling import models, fitting import scipy.stats from scipy.integrate", "= header['clusterMass'][0]/mMean #Nmult = 1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read in rest", "Appending lists with all the p/ecc values to our dataframes", "+= dhRec0/Nall*Nmult maghRec[filt] += maghRec0/Nall*Nmult rhRec[filt] += rhRec0/Nall*Nmult #for the", "fb m1hAll += m1hAll0/Nall*Nmult qhAll += qhAll0/Nall*Nmult ehAll += ehAll0/Nall*Nmult", "3 if (doIndividualPlots): fmass, axmass = plt.subplots() fqrat, axqrat =", "maghObs0/Nall*Nmult rhObs += rhObs0/Nall*Nmult #Rec recCombined = pd.DataFrame() prsaRecCombined =", "= [0.20, 0.35, 0.50, 0.70, 0.75] init = models.PowerLaw1D(amplitude=0.5, x_0=1,", "'r2/r1', 'EBLSST_rhist') #make the mollweide coords = SkyCoord(RA, Dec, unit=(units.degree,", "#account for the hard-soft boundary Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *=", "(filt == 'all'): key = 'LSM_PERIOD' fullP = abs(data[key] -", "in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(recNPrsa), np.log10(np.sum(recNPrsa))) print(\"Prsa 15.8<r<19.5", "log):\",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print(\"total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw,", "(doIndividualPlots): fmass, axmass = plt.subplots() fqrat, axqrat = plt.subplots() fecc,", "= np.concatenate(pRec) # print('Ecc lists:', eccAll, eccObs, eccRec) # print('P", "Doing this so we don't have to run analyse each", "= ['e', 'p']) peccObs = pd.DataFrame(columns = ['e', 'p']) peccRec", "as a function of mass dm1 = np.diff(m1b) m1val =", "eccObs, eccRec) # print('P lists:', pAll, pObs, pRec) # Appending", "numbers recCombined = recCombined.append(rec) prsaRecCombined = prsaRecCombined.append(prsaRec) # Going to", "these peccAll = pd.DataFrame(columns = ['e', 'p']) peccObs = pd.DataFrame(columns", "= [] #cdfRec = dict() #for f in filters: #", "saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist') saveHist(dhAll, dhObs, dhRec,", "peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p']) #plot and save the histograms", "lphRec[filt] += lphRec0/Nall*Nmult dhRec[filt] += dhRec0/Nall*Nmult maghRec[filt] += maghRec0/Nall*Nmult rhRec[filt]", "= np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:] #Recovered m1hRec = dict() qhRec", "Nmult = header['clusterMass'][0]/mMean #Nmult = 1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read in", "'tight') if (doIndividualPlots): fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight') fqrat.savefig('./plots/'", "= Nall fioN = Nobs firN = Nrec NrecPrsa =", "rec) and put them into separate file # Doing this", "maghAll += maghAll0/Nall*Nmult rhAll += rhAll0/Nall*Nmult #Obs obs = data.loc[data['LSM_PERIOD']", "run analyse each time # Can write separate script for", "= ['e', 'p']) #plot and save the histograms saveHist(m1hAll, m1hObs,", "rhRec[f] = np.zeros_like(rbins)[1:] RA = [] Dec = [] recFrac", "Pcut) | (halfP < Pcut) | (twiceP < Pcut))] prsaRec", "# All dataframe peccAll['e'] = eccAll peccAll['p'] = pAll #", "= np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:] lphAll =", "#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms", "include cdf with ax1, ax2 histAll = np.insert(histAll,0,0) histObs =", "rbins = np.arange(0, 100, 0.2, dtype='float') #blanks for the histograms", "= len(prsaObs.index) # Appending for Andrew's files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if", "gatspy (raw, log):\",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print(\"number of binaries in recovered with", "ecc lists eccAll = np.concatenate(eccAll) eccObs = np.concatenate(eccObs) eccRec =", "ehAll += ehAll0/Nall*Nmult lphAll += lphAll0/Nall*Nmult dhAll += dhAll0/Nall*Nmult maghAll", "'r_', 'i_', 'z_', 'y_', 'all'] #get the Raghavan binary fraction", "= 'tight') fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight') fecc.savefig('./plots/' +", "pObs # Recovered dataframe peccRec['e'] = eccRec peccRec['p'] = pRec", "for period and eccentricity for Andrew's circularization plots eccAll =", "= np.zeros_like(rbins)[1:] #Observable m1hObs = np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:] ehObs", "recN.append(rN) rawN.append(raN) obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec),", "magbins = np.arange(11, 25, 1, dtype='float') rbins = np.arange(0, 100,", "need to normalize?) prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r']", "field, but limiting by the hard-soft boundary def fitRagfb(): x", "qhAll = np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:] dhAll", "[] fileN = [] fileObsN = [] fileRecN = []", "= np.histogram(rec[\"m2\"]/rec[\"m1\"], bins=qbins) ehRec0, eb = np.histogram(rec[\"e\"], bins=ebins) lphRec0, lpb", "= np.concatenate(eccObs) eccRec = np.concatenate(eccRec) pAll = np.concatenate(pAll) pObs =", "for \"recovered\" Pcut = 0.1 #assumed mean stellar mass mMean", "for the histograms #All m1hAll = np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:]", "'g_', 'r_', 'i_', 'z_', 'y_','all']): c1 = '#5687A6' #Dali Blue", "######################### ######################### # Baseline M67 long script -- NO crowding", "in input period ######################### ######################### # Baseline M67 long script", "= np.histogram(rec[\"d\"], bins=dbins) maghRec0, magb = np.histogram(rec[\"appMagMean_r\"], bins=magbins) rhRec0, rb", "pObs = np.concatenate(pObs) pRec = np.concatenate(pRec) # print('Ecc lists:', eccAll,", "= 0 with open(fname) as f: for i, l in", "files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if (Nobs >= Nlim): m1hObs0, m1b =", "(raw, log):\",np.sum(fileN), np.log10(np.sum(fileN))) print(\"number of binaries in tested with gatspy", "0. NrecPrsa = 0. Nall = len(data.index)/intNorm ###is this correct?", "def RagNormal(x, cdf = False): mean = 5.03 std =", "= np.diff(m1b) m1val = m1b[:-1] + dm1/2. fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val))", "Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print(\"total recovered in", "print(peccRec.columns) # 3 letter code corresponds to scenario (OC/GC, baseline/colossus,", "ehObs = np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:] maghObs", "f in filters: outline += ','+f+'histRec' outline += '\\n' fl.write(outline)", "\"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)),", "bins=lpbins) dhRec0, db = np.histogram(rec[\"d\"], bins=dbins) maghRec0, magb = np.histogram(rec[\"appMagMean_r\"],", "in all the data and make the histograms d =", "eccObs peccObs['p'] = pObs # Recovered dataframe peccRec['e'] = eccRec", "range(len(bin_edges)): outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f in filters: outline +=", "+ 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight') if (doIndividualPlots): fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf',", "histograms #All m1hAll = np.zeros_like(mbins)[1:] qhAll = np.zeros_like(qbins)[1:] ehAll =", "Dataframes:', peccAll, peccObs, peccRec) # print(peccRec.columns) # 3 letter code", "len(prsaRecCombined.index) NrecPrsa = NrecPrsa/Nall*Nmult NobsPrsa = NobsPrsa/Nall*Nmult NallPrsa = NallPrsa/Nall*Nmult", "ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f]", "- data['p'])/data['p'] halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key]", "lw = 0.5 ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular", "IDs = [] for i, f in enumerate(files): print(round(i/len(files),4), f)", "Phs) Nmult *= fb m1hAll += m1hAll0/Nall*Nmult qhAll += qhAll0/Nall*Nmult", "for f in filters: outline += ','+str(histRec[f][i]) outline += '\\n'", "i in range(len(histObs)): # cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) # for f in filters:", "'p']) peccRec = pd.DataFrame(columns = ['e', 'p']) #Read in all", "cdf with ax1, ax2 histAll = np.insert(histAll,0,0) histObs = np.insert(histObs,0,0)", "= 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' +", "15.8) & (data['p'] < 1000) & (data['p'] > 0.5)] #", "recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN))", "0.35, 0.50, 0.70, 0.75] init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.) fitter", "RA = [] Dec = [] recFrac = [] recN", "log):\",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print(\"total observable in Prsa 15.8<r<19.5 P<1000d sample (raw,", "RagNormal(np.log10(Phs), cdf = True) print(\"fb, Phs = \", fb, Phs)", "recFrac.append(rF) recN.append(rN) rawN.append(raN) obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa)", "eccObs = [] eccRec = [] pAll = [] pObs", "+= dhAll0/Nall*Nmult maghAll += maghAll0/Nall*Nmult rhAll += rhAll0/Nall*Nmult #Obs obs", "[] eccRec = [] pAll = [] pObs = []", "astropy import units, constants from astropy.modeling import models, fitting import", "= 0. raN = 0. obN = 0. fiN =", "err = quad(RagNormal, -20, 20) intCut, err = quad(RagNormal, -20,", "if (filt == 'all'): key = 'LSM_PERIOD' fullP = abs(data[key]", "for i in range(len(histAll)): # cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) # for i in", "= filt+'LSS_PERIOD' if (filt == 'all'): key = 'LSM_PERIOD' fullP", "'#5687A6' #Dali Blue (Andrew's AAS Poster) c2 = '#A62B1F' #Dai", "function of mass dm1 = np.diff(m1b) m1val = m1b[:-1] +", "rhAll0/np.sum(rhAll0), color='black', alpha=0.1) #account for the binary fraction, as a", "# for i in range(len(histObs)): # cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) # for f", "astropy.coordinates import SkyCoord from astropy import units, constants from astropy.modeling", "m1hRec[filt] += m1hRec0/Nall*Nmult qhRec[filt] += qhRec0/Nall*Nmult ehRec[filt] += ehRec0/Nall*Nmult lphRec[filt]", "#Dai Red c3 = '#BF8A26' #Dali Beige fig,ax1 = plt.subplots(figsize=(8,6),", "ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)", "to account for limit in input period ######################### ######################### #", "maghRec, magb, 'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist')", "bins=magbins) rhRec0, rb = np.histogram(rec[\"r2\"]/rec[\"r1\"], bins=rbins) m1hRec[filt] += m1hRec0/Nall*Nmult qhRec[filt]", "'all'): lw = 0.5 ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw) ax1.set_ylabel('PDF') ax1.set_yscale('log')", "i in range(len(histRec[f])): # cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges, cdfAll, color=c1) #ax2.step(bin_edges, cdfObs,", "P<1000d sample (raw, log):\",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print(\"total recovered in Prsa 15.8<r<19.5", "np.zeros_like(rbins)[1:] RA = [] Dec = [] recFrac = []", "np.histogram(data[\"e\"], bins=ebins) lphAll0, lpb = np.histogram(np.ma.log10(data[\"p\"].values).filled(-999), bins=lpbins) dhAll0, db =", "#for f in filters: # cdfRec[f] = [] # for", "fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for the hard-soft boundary Phs =", "eb = np.histogram(obs[\"e\"], bins=ebins) lphObs0, lpb = np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999), bins=lpbins) dhObs0,", "'tight') frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight') print(\"###################\") print(\"number of", "rhRec = dict() for f in filters: m1hRec[f] = np.zeros_like(mbins)[1:]", "np.histogram(rec[\"d\"], bins=dbins) maghRec0, magb = np.histogram(rec[\"appMagMean_r\"], bins=magbins) rhRec0, rb =", "obN = Nobs/Nall*Nmult fiN = Nall fioN = Nobs firN", "\", fb, Phs) Nmult *= fb m1hAll += m1hAll0/Nall*Nmult qhAll", "limiting by the hard-soft boundary def fitRagfb(): x = [0.05,", "time # Can write separate script for p-ecc plots #", "intAll, err = quad(RagNormal, -20, 20) intCut, err = quad(RagNormal,", "from astropy.modeling import models, fitting import scipy.stats from scipy.integrate import", "Appending for Andrew's files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if (Nobs >= Nlim):", "= 'tight') frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight') print(\"###################\") print(\"number", "m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f]", "in filters: outline += ','+str(histRec[f][i]) outline += '\\n' fl.write(outline) if", "[] # for i in range(len(histAll)): # cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) # for", "lphAll0/Nall*Nmult dhAll += dhAll0/Nall*Nmult maghAll += maghAll0/Nall*Nmult rhAll += rhAll0/Nall*Nmult", "#ax2.step(bin_edges, cdfAll, color=c1) #ax2.step(bin_edges, cdfObs, color=c2) #for f in filters:", "= plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180.,", "eb, 'e', 'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist')", "s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/'", "obs, rec) and put them into separate file # Doing", "pRec # print('Final Dataframes:', peccAll, peccObs, peccRec) # print(peccRec.columns) #", "np.arange(0,10, 0.1, dtype='float') qbins = np.arange(0,1, 0.1, dtype='float') ebins =", "def file_len(fname): i = 0 with open(fname) as f: for", "mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4) cbar", "of binaries in tested with gatspy (raw, log):\",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print(\"number", "8, 15] #estimates of midpoints in bins, and using this:", "= os.listdir(d) IDs = [] for i, f in enumerate(files):", "NrecPrsa/Nall*Nmult NobsPrsa = NobsPrsa/Nall*Nmult NallPrsa = NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN) rawN.append(raN)", "maghAll0/np.sum(maghAll0), color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1) #account for the", "in filters: # for i in range(len(histRec[f])): # cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges,", "fitting.LevMarLSQFitter() fit = fitter(init, x, y) return fit def RagNormal(x,", "eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if (filt == 'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if (Nrec", "'z_', 'y_', 'all'] #get the Raghavan binary fraction fit fbFit=", "to consider in file Nlim = 3 if (doIndividualPlots): fmass,", "lw = 1 if (f == 'all'): lw = 0.5", "False): mean = 5.03 std = 2.28 if (cdf): return", "f in filters: # for i in range(len(histRec[f])): # cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f]))", "rhRec, rb, 'r2/r1', 'EBLSST_rhist') #make the mollweide coords = SkyCoord(RA,", "dataframe peccRec['e'] = eccRec peccRec['p'] = pRec # print('Final Dataframes:',", "for Andrew eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa = len(prsa.index) if (Nall >=", "= 'tight') #write to a text file with open('./eblsst_files/' +", "put them into separate file # Doing this so we", "d = \"./input_files/\" files = os.listdir(d) IDs = [] for", "fileRecN.append(firN) allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec,", "np.histogram(rec[\"m1\"], bins=mbins) qhRec0, qb = np.histogram(rec[\"m2\"]/rec[\"m1\"], bins=qbins) ehRec0, eb =", "= quad(RagNormal, -20, np.log10(365*10.)) intNorm = intCut/intAll #cutoff in percent", "color=c1) #ax2.step(bin_edges, cdfObs, color=c2) #for f in filters: # lw", "[] #cdfRec = dict() #for f in filters: # cdfRec[f]", "hard-soft boundary def fitRagfb(): x = [0.05, 0.1, 1, 8,", "ACCOUNT FOR THE BINARY FRACTION when combining histograms ##################### Nmult", "+= qhAll0/Nall*Nmult ehAll += ehAll0/Nall*Nmult lphAll += lphAll0/Nall*Nmult dhAll +=", "header = ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p']) #plot", "ehObs0/Nall*Nmult lphObs += lphObs0/Nall*Nmult dhObs += dhObs0/Nall*Nmult maghObs += maghObs0/Nall*Nmult", "limit in input period ######################### ######################### # Baseline M67 long", "color='black', alpha=0.1) axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1) #account for the binary", "axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0),", "< Pcut) | (twiceP < Pcut))] Nrec = len(rec.index) #I'd", "print('Final Dataframes:', peccAll, peccObs, peccRec) # print(peccRec.columns) # 3 letter", "Phs = \", fb, Phs) Nmult *= fb m1hAll +=", "ax1.set_title('Globular Clusters - Baseline', fontsize = 16) ax1.set_xlabel(xtitle) #CDF #cdfAll", "# Can write separate script for p-ecc plots # Quest", "#ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight') #write to", "dhObs0/Nall*Nmult maghObs += maghObs0/Nall*Nmult rhObs += rhObs0/Nall*Nmult #Rec recCombined =", "this correct? (and the only place I need to normalize?)", "bins=mbins) qhRec0, qb = np.histogram(rec[\"m2\"]/rec[\"m1\"], bins=qbins) ehRec0, eb = np.histogram(rec[\"e\"],", "'#BF8A26' #Dali Beige fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include", "= pAll # Observable dataframe peccObs['e'] = eccObs peccObs['p'] =", "'tight') #write to a text file with open('./eblsst_files/' + fname+'.csv','w')", "cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) # for f in filters: # for i in", "accurate numbers recCombined = recCombined.append(rec) prsaRecCombined = prsaRecCombined.append(prsaRec) # Going", "(data['LSM_PERIOD'] != -999)] NobsPrsa = len(prsaObs.index) # Appending for Andrew's", "= plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2", "print(\"total observable (raw, log):\",np.sum(obsN), np.log10(np.sum(obsN))) print(\"total recovered (raw, log):\",np.sum(recN), np.log10(np.sum(recN)))", "BINARY FRACTION when combining histograms ##################### Nmult = header['clusterMass'][0]/mMean #Nmult", "*= fb m1hAll += m1hAll0/Nall*Nmult qhAll += qhAll0/Nall*Nmult ehAll +=", "str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f in filters: outline += ','+str(histRec[f][i]) outline +=", "outline = 'binEdges,histAll,histObs' for f in filters: outline += ','+f+'histRec'", "m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass): Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day)", "= 'binEdges,histAll,histObs' for f in filters: outline += ','+f+'histRec' outline", "figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r',", "to include cdf with ax1, ax2 histAll = np.insert(histAll,0,0) histObs", "dict() for f in filters: m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f] =", "= np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f] =", "(halfP < Pcut) | (twiceP < Pcut))] prsaRec = data.loc[(data['appMagMean_r']", "astropy.modeling import models, fitting import scipy.stats from scipy.integrate import quad", "1, dtype='float') magbins = np.arange(11, 25, 1, dtype='float') rbins =", "coords.galactic.b.wrap_at(180.*units.degree).degree RAwrap = coords.ra.wrap_at(180.*units.degree).degree Decwrap = coords.dec.wrap_at(180.*units.degree).degree f, ax =", "np.histogram(obs[\"d\"], bins=dbins) maghObs0, magb = np.histogram(obs[\"appMagMean_r\"], bins=magbins) rhObs0, rb =", "the data and make the histograms d = \"./input_files/\" files", "- Baseline', fontsize = 16) ax1.set_xlabel(xtitle) #CDF #cdfAll = []", "= [] fileRecN = [] allNPrsa = [] obsNPrsa =", "plots # Quest paths in this version of script import", "in filters: outline += ','+f+'histRec' outline += '\\n' fl.write(outline) for", "i in range(len(bin_edges)): outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f in filters:", "firN = 0. NallPrsa = 0. NobsPrsa = 0. NrecPrsa", "m1hObs += m1hObs0/Nall*Nmult qhObs += qhObs0/Nall*Nmult ehObs += ehObs0/Nall*Nmult lphObs", "NobsPrsa = len(prsaObs.index) # Appending for Andrew's files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values)", "np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) # Concatenating p and ecc lists eccAll =", "bbox_inches = 'tight') fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight') frad.savefig('./plots/'", "print(\"total recovered (raw, log):\",np.sum(recN), np.log10(np.sum(recN))) print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.) print(\"###################\") print(\"total in Prsa", "fitting import scipy.stats from scipy.integrate import quad #for Quest import", "np.zeros_like(rbins)[1:] #Recovered m1hRec = dict() qhRec = dict() ehRec =", "eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if (Nobs >= Nlim): m1hObs0, m1b = np.histogram(obs[\"m1\"],", "#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16)", "= 2).fillna(-999) rF = 0. rN = 0. Nrec =", "outline += '\\n' fl.write(outline) for i in range(len(bin_edges)): outline =", "= [0.05, 0.1, 1, 8, 15] #estimates of midpoints in", "Nrec NrecPrsa = len(prsaRecCombined.index) NrecPrsa = NrecPrsa/Nall*Nmult NobsPrsa = NobsPrsa/Nall*Nmult", "init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.) fitter = fitting.LevMarLSQFitter() fit =", "# print('P lists:', pAll, pObs, pRec) # Appending lists with", "Andrew's files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if (Nobs >= Nlim): m1hObs0, m1b", "for the binary fraction, as a function of mass dm1", "np.log10(np.sum(fileN))) print(\"number of binaries in tested with gatspy (raw, log):\",np.sum(fileObsN),", "only place I need to normalize?) prsa = data.loc[(data['appMagMean_r'] <=", "allNPrsa.append(NallPrsa) obsNPrsa.append(NobsPrsa) recNPrsa.append(NrecPrsa) #print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs),", "in tested with gatspy (raw, log):\",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print(\"number of binaries", "code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header", "< 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)] NobsPrsa", "header = pd.read_csv(d+f, nrows=1) ###################### #NEED TO ACCOUNT FOR THE", "= [] eccObs = [] eccRec = [] pAll =", "lphObs0, lpb = np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999), bins=lpbins) dhObs0, db = np.histogram(obs[\"d\"], bins=dbins)", "data.loc[data['LSM_PERIOD'] != -999] Nobs = len(obs.index) prsaObs = data.loc[(data['appMagMean_r'] <=", "# Doing this so we don't have to run analyse", "as pd import numpy as np import os from astropy.coordinates", "as plt def file_len(fname): i = 0 with open(fname) as", "f in enumerate(files): print(round(i/len(files),4), f) fl = file_len(d+f) if (fl", "'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight') f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5))", "-- NO crowding # New script copied from quest -", "#read in rest of the file data = pd.read_csv(d+f, header", "[] # Lists for period and eccentricity for Andrew's circularization", "dhRec[filt] += dhRec0/Nall*Nmult maghRec[filt] += maghRec0/Nall*Nmult rhRec[filt] += rhRec0/Nall*Nmult #for", "+= lphObs0/Nall*Nmult dhObs += dhObs0/Nall*Nmult maghObs += maghObs0/Nall*Nmult rhObs +=", "# Quest paths in this version of script import pandas", "+= '\\n' fl.write(outline) if __name__ == \"__main__\": filters = ['u_',", "outline += '\\n' fl.write(outline) if __name__ == \"__main__\": filters =", "maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:] RA = [] Dec", "[] fileObsN = [] fileRecN = [] allNPrsa = []", "Red c3 = '#BF8A26' #Dali Beige fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can", "= False): mean = 5.03 std = 2.28 if (cdf):", "[] pRec = [] # Using prsa dataframes for these", "maghAll0, magb = np.histogram(data[\"appMagMean_r\"], bins=magbins) rhAll0, rb = np.histogram(data[\"r2\"]/data[\"r1\"], bins=rbins)", "file_len(d+f) if (fl >= 4): #read in the header header", "np.concatenate(eccRec) pAll = np.concatenate(pAll) pObs = np.concatenate(pObs) pRec = np.concatenate(pRec)", "so we don't have to run analyse each time #", "bins=ebins) lphObs0, lpb = np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999), bins=lpbins) dhObs0, db = np.histogram(obs[\"d\"],", "= NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN) rawN.append(raN) obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN) allNPrsa.append(NallPrsa)", "write separate script for p-ecc plots # Quest paths in", "linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight')", "'distPDFall.pdf',format='pdf', bbox_inches = 'tight') fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight')", "Quest import matplotlib matplotlib.use('Agg') doIndividualPlots = True from matplotlib import", "print(fbFit) #to normalize intAll, err = quad(RagNormal, -20, 20) intCut,", "Nobs = 0. raN = 0. obN = 0. fiN", "sample (raw, log):\",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print(\"total recovered in Prsa 15.8<r<19.5 P<1000d", "+ 'distPDFall.pdf',format='pdf', bbox_inches = 'tight') fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches =", "x, y) return fit def RagNormal(x, cdf = False): mean", "= 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r',", "= 'tight') fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight') flper.savefig('./plots/' +", "in filters: # cdfRec[f] = [] # for i in", "axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0),", "plots to account for all filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if (filt", "= data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] <", "def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_',", "(doIndividualPlots): fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight') fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf',", "qhAll0, qb = np.histogram(data[\"m2\"]/data[\"m1\"], bins=qbins) ehAll0, eb = np.histogram(data[\"e\"], bins=ebins)", "np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:] RA = [] Dec = []", "period cutoff at 1000 days # Dataframes to write to", "np.concatenate(pAll) pObs = np.concatenate(pObs) pRec = np.concatenate(pRec) # print('Ecc lists:',", "a text file with open('./eblsst_files/' + fname+'.csv','w') as fl: outline", "plt.subplots() flper, axlper = plt.subplots() fdist, axdist = plt.subplots() fmag,", "'mollweide_N.pdf',format='pdf', bbox_inches = 'tight') if (doIndividualPlots): fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches", "0. Nrec = 0. Nobs = 0. raN = 0.", "mean = 5.03 std = 2.28 if (cdf): return scipy.stats.norm.cdf(x,mean,std)", "lists because of period cutoff at 1000 days # Dataframes", "long script -- NO crowding # New script copied from", "when combining histograms ##################### Nmult = header['clusterMass'][0]/mMean #Nmult = 1.", "import pyplot as plt def file_len(fname): i = 0 with", "= plt.subplots() fqrat, axqrat = plt.subplots() fecc, axecc = plt.subplots()", "< Pcut))] Nrec = len(rec.index) #I'd like to account for", "histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_',", "for p-ecc plots # Quest paths in this version of", "np.histogram(obs[\"appMagMean_r\"], bins=magbins) rhObs0, rb = np.histogram(obs[\"r2\"]/obs[\"r1\"], bins=rbins) m1hObs += m1hObs0/Nall*Nmult", "+= dhObs0/Nall*Nmult maghObs += maghObs0/Nall*Nmult rhObs += rhObs0/Nall*Nmult #Rec recCombined", ">15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD']", "qhAll0/np.sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black',", "np.histogram(obs[\"e\"], bins=ebins) lphObs0, lpb = np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999), bins=lpbins) dhObs0, db =", "a function of mass dm1 = np.diff(m1b) m1val = m1b[:-1]", "for f in filters: lw = 1 if (f ==", "= np.arange(0, 1.05, 0.05, dtype='float') lpbins = np.arange(-2, 10, 0.5,", "['e', 'p']) #Read in all the data and make the", "dict() dhRec = dict() maghRec = dict() rhRec = dict()", "pObs = [] pRec = [] # Using prsa dataframes", "files = os.listdir(d) IDs = [] for i, f in", "np import os from astropy.coordinates import SkyCoord from astropy import", "dhObs0, db = np.histogram(obs[\"d\"], bins=dbins) maghObs0, magb = np.histogram(obs[\"appMagMean_r\"], bins=magbins)", "(halfP < Pcut) | (twiceP < Pcut))] Nrec = len(rec.index)", "eb = np.histogram(data[\"e\"], bins=ebins) lphAll0, lpb = np.histogram(np.ma.log10(data[\"p\"].values).filled(-999), bins=lpbins) dhAll0,", "= 5.03 std = 2.28 if (cdf): return scipy.stats.norm.cdf(x,mean,std) return", "print(\"recovered/observable*100 with gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.) print(\"###################\") print(\"total in sample (raw, log):\",np.sum(rawN), np.log10(np.sum(rawN)))", "ehAll0, eb = np.histogram(data[\"e\"], bins=ebins) lphAll0, lpb = np.histogram(np.ma.log10(data[\"p\"].values).filled(-999), bins=lpbins)", "if (doIndividualPlots): fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight') fqrat.savefig('./plots/' +", "1 # if (f == 'all'): # lw = 0.5", "filters: outline += ','+f+'histRec' outline += '\\n' fl.write(outline) for i", "if (fl >= 4): #read in the header header =", "color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1)", "sample (raw, log):\",np.sum(rawN), np.log10(np.sum(rawN))) print(\"total observable (raw, log):\",np.sum(obsN), np.log10(np.sum(obsN))) print(\"total", "x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints", "#for the mollweide if (filt == 'all'): Nrec = len(recCombined.index)", "rhRec0/Nall*Nmult #for the mollweide if (filt == 'all'): Nrec =", "with gatspy (raw, log):\",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print(\"recovered/observable*100 with gatspy:\",np.sum(fileRecN)/np.sum(fileObsN)*100.) print(\"###################\") print(\"total", "# 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no", "m1b = np.histogram(data[\"m1\"], bins=mbins) qhAll0, qb = np.histogram(data[\"m2\"]/data[\"m1\"], bins=qbins) ehAll0,", "(twiceP < Pcut))] prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r']", "#similar to field, but limiting by the hard-soft boundary def", "lists:', eccAll, eccObs, eccRec) # print('P lists:', pAll, pObs, pRec)", "c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N)", "eccAll.append(prsa['e'].values) pAll.append(prsa['p'].values) NallPrsa = len(prsa.index) if (Nall >= Nlim): #create", "rF = 0. rN = 0. Nrec = 0. Nobs", "(Nrec >= Nlim): m1hRec0, m1b = np.histogram(rec[\"m1\"], bins=mbins) qhRec0, qb", "observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa))) print(\"total", "import os from astropy.coordinates import SkyCoord from astropy import units,", "dm1/2. fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for the hard-soft boundary Phs", "dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec, magb,", "print(\"fb, Phs = \", fb, Phs) Nmult *= fb m1hAll", "prsa dataframes for these lists because of period cutoff at", "= np.zeros_like(qbins)[1:] ehRec[f] = np.zeros_like(ebins)[1:] lphRec[f] = np.zeros_like(lpbins)[1:] dhRec[f] =", "dhRec[f] = np.zeros_like(dbins)[1:] maghRec[f] = np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:] RA", "######################### ######################### # Need to account for limit in input", "= np.insert(histAll,0,0) histObs = np.insert(histObs,0,0) for f in filters: histRec[f]", "np.histogram(data[\"m2\"]/data[\"m1\"], bins=qbins) ehAll0, eb = np.histogram(data[\"e\"], bins=ebins) lphAll0, lpb =", "Nrec = len(rec.index) #I'd like to account for all filters", "[] eccObs = [] eccRec = [] pAll = []", "abs(data[key] - 2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key] != -999) & (", "= data.loc[(data[key] != -999) & ( (fullP < Pcut) |", "| (halfP < Pcut) | (twiceP < Pcut))] Nrec =", "account for all filters eccRec.append(prsaRec['e'].values) pRec.append(prsaRec['p'].values) if (filt == 'all'):", "(all, obs, rec) and put them into separate file #", "bbox_inches = 'tight') print(\"###################\") print(\"number of binaries in input files", "intCut/intAll #cutoff in percent error for \"recovered\" Pcut = 0.1", "quad(RagNormal, -20, np.log10(365*10.)) intNorm = intCut/intAll #cutoff in percent error", "4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s", "[] recFrac = [] recN = [] rawN = []", "= 'tight') print(\"###################\") print(\"number of binaries in input files (raw,", "= np.arange(0, 40, 1, dtype='float') magbins = np.arange(11, 25, 1,", "fiN = Nall fioN = Nobs firN = Nrec NrecPrsa", "0. NobsPrsa = 0. NrecPrsa = 0. Nall = len(data.index)/intNorm", "getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb *= RagNormal(np.log10(Phs), cdf = True) print(\"fb, Phs =", "print(\"total in sample (raw, log):\",np.sum(rawN), np.log10(np.sum(rawN))) print(\"total observable (raw, log):\",np.sum(obsN),", "m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day) #similar to field, but limiting by the", "prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) &", "https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y = [0.20, 0.35, 0.50, 0.70, 0.75] init =", "ax1.set_ylabel('PDF') ax1.set_yscale('log') ax1.set_title('Globular Clusters - Baseline', fontsize = 16) ax1.set_xlabel(xtitle)", "= np.zeros_like(magbins)[1:] rhRec[f] = np.zeros_like(rbins)[1:] RA = [] Dec =", "days # Dataframes to write to files later; 3 files", "Nrec/Nall*Nmult raN = Nmult obN = Nobs/Nall*Nmult fiN = Nall", "dhAll0, db = np.histogram(data[\"d\"], bins=dbins) maghAll0, magb = np.histogram(data[\"appMagMean_r\"], bins=magbins)", "!= -999] Nobs = len(obs.index) prsaObs = data.loc[(data['appMagMean_r'] <= 19.5)", "mollweide if (filt == 'all'): Nrec = len(recCombined.index) rF =", "= \", fb, Phs) Nmult *= fb m1hAll += m1hAll0/Nall*Nmult", "qhObs = np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:] dhObs", "the hard-soft boundary def fitRagfb(): x = [0.05, 0.1, 1,", "mollweide coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs') lGal = coords.galactic.l.wrap_at(180.*units.degree).degree", "# for i in range(len(histAll)): # cdfAll.append(np.sum(histAll[:i])/np.sum(histAll)) # for i", "pd.DataFrame(columns = ['e', 'p']) peccRec = pd.DataFrame(columns = ['e', 'p'])", "with all the p/ecc values to our dataframes # All", "0.5 # ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/'", "version of script import pandas as pd import numpy as", "'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e',", "= [] obsNPrsa = [] recNPrsa = [] # Lists", "= data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p']", "rhAll += rhAll0/Nall*Nmult #Obs obs = data.loc[data['LSM_PERIOD'] != -999] Nobs", "f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight') if (doIndividualPlots): fmass.savefig('./plots/' +", "print('Ecc lists:', eccAll, eccObs, eccRec) # print('P lists:', pAll, pObs,", "p-ecc plots # Quest paths in this version of script", "#Nmult = 1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read in rest of the", "pRec = np.concatenate(pRec) # print('Ecc lists:', eccAll, eccObs, eccRec) #", "data = pd.read_csv(d+f, header = 2).fillna(-999) rF = 0. rN", "dbins = np.arange(0, 40, 1, dtype='float') magbins = np.arange(11, 25,", "bins=ebins) lphRec0, lpb = np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999), bins=lpbins) dhRec0, db = np.histogram(rec[\"d\"],", "#create histograms #All m1hAll0, m1b = np.histogram(data[\"m1\"], bins=mbins) qhAll0, qb", "= np.histogram(obs[\"d\"], bins=dbins) maghObs0, magb = np.histogram(obs[\"appMagMean_r\"], bins=magbins) rhObs0, rb", "color=c3, linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches =", "for limit in input period ######################### ######################### # Baseline M67", "want to take p and ecc from each population (all,", "= Nmult obN = Nobs/Nall*Nmult fiN = Nall fioN =", "for i, l in enumerate(f): pass return i + 1", "np.log10(np.sum(rawN))) print(\"total observable (raw, log):\",np.sum(obsN), np.log10(np.sum(obsN))) print(\"total recovered (raw, log):\",np.sum(recN),", "'y_', 'all'] #get the Raghavan binary fraction fit fbFit= fitRagfb()", "Nbins = 25 mbins = np.arange(0,10, 0.1, dtype='float') qbins =", "= np.zeros_like(lpbins)[1:] dhObs = np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:] rhObs =", "dm1 = np.diff(m1b) m1val = m1b[:-1] + dm1/2. fb =", "['e', 'p']) peccObs = pd.DataFrame(columns = ['e', 'p']) peccRec =", "filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all'] #get", "the binary fraction, as a function of mass dm1 =", "== 'all'): key = 'LSM_PERIOD' fullP = abs(data[key] - data['p'])/data['p']", "plt.subplots() #bins for all the histograms Nbins = 25 mbins", "= ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16)", "histograms d = \"./input_files/\" files = os.listdir(d) IDs = []", "our dataframes # All dataframe peccAll['e'] = eccAll peccAll['p'] =", "them into separate file # Doing this so we don't", "import quad #for Quest import matplotlib matplotlib.use('Agg') doIndividualPlots = True", "return scipy.stats.norm.pdf(x,mean,std) def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters", "#read in the header header = pd.read_csv(d+f, nrows=1) ###################### #NEED", "in range(len(histRec[f])): # cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges, cdfAll, color=c1) #ax2.step(bin_edges, cdfObs, color=c2)", "models, fitting import scipy.stats from scipy.integrate import quad #for Quest", "= str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for f in filters: outline += ','+str(histRec[f][i]) outline", "raN = 0. obN = 0. fiN = 0. fioN", "print(\"number of binaries in tested with gatspy (raw, log):\",np.sum(fileObsN), np.log10(np.sum(fileObsN)))", "0. rN = 0. Nrec = 0. Nobs = 0.", "True from matplotlib import pyplot as plt def file_len(fname): i", "ehAll = np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:] maghAll", "# Dataframes to write to files later; 3 files for", "f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight')", "np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:] ehObs = np.zeros_like(ebins)[1:] lphObs = np.zeros_like(lpbins)[1:]", "#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16)", "p and ecc lists eccAll = np.concatenate(eccAll) eccObs = np.concatenate(eccObs)", "color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1)", "+= qhRec0/Nall*Nmult ehRec[filt] += ehRec0/Nall*Nmult lphRec[filt] += lphRec0/Nall*Nmult dhRec[filt] +=", "= np.histogram(data[\"r2\"]/data[\"r1\"], bins=rbins) if (doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1],", "for i in range(len(histRec[f])): # cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges, cdfAll, color=c1) #ax2.step(bin_edges,", "= pd.DataFrame(columns = ['e', 'p']) peccObs = pd.DataFrame(columns = ['e',", "!= -999)] NobsPrsa = len(prsaObs.index) # Appending for Andrew's files", "bins=dbins) maghObs0, magb = np.histogram(obs[\"appMagMean_r\"], bins=magbins) rhObs0, rb = np.histogram(obs[\"r2\"]/obs[\"r1\"],", "print(\"total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(recNPrsa), np.log10(np.sum(recNPrsa)))", "c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180.,", "'i_', 'z_', 'y_','all']): c1 = '#5687A6' #Dali Blue (Andrew's AAS", "axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0),", "'e', 'EBLSST_ehist') saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist') saveHist(dhAll,", "(raw, log):\",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print(\"number of binaries in recovered with gatspy", "= abs(data[key] - 2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key] != -999) &", "alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1) axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1],", "# Recovered dataframe peccRec['e'] = eccRec peccRec['p'] = pRec #", "= fitting.LevMarLSQFitter() fit = fitter(init, x, y) return fit def", "bins=rbins) m1hObs += m1hObs0/Nall*Nmult qhObs += qhObs0/Nall*Nmult ehObs += ehObs0/Nall*Nmult", "'q (m2/m1)', 'EBLSST_qhist') saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist') saveHist(lphAll,", "# print(peccRec.columns) # 3 letter code corresponds to scenario (OC/GC,", "for each sub-population - append everything to these peccAll =", "| (twiceP < Pcut))] Nrec = len(rec.index) #I'd like to", "peccRec = pd.DataFrame(columns = ['e', 'p']) #Read in all the", "to run analyse each time # Can write separate script", "recCombined = recCombined.append(rec) prsaRecCombined = prsaRecCombined.append(prsaRec) # Going to use", "< 1000) & (data['p'] > 0.5)] # Appending for Andrew", "[] Dec = [] recFrac = [] recN = []", "tested with gatspy (raw, log):\",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print(\"number of binaries in", "dhRec0, db = np.histogram(rec[\"d\"], bins=dbins) maghRec0, magb = np.histogram(rec[\"appMagMean_r\"], bins=magbins)", "if __name__ == \"__main__\": filters = ['u_', 'g_', 'r_', 'i_',", "& (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5)", "= plt.subplots() fecc, axecc = plt.subplots() flper, axlper = plt.subplots()", "= np.arange(0, 100, 0.2, dtype='float') #blanks for the histograms #All", "np.arange(0, 100, 0.2, dtype='float') #blanks for the histograms #All m1hAll", "= 0. NobsPrsa = 0. NrecPrsa = 0. Nall =", "= ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all'] #get the", "[] obsNPrsa = [] recNPrsa = [] # Lists for", "NallPrsa = 0. NobsPrsa = 0. NrecPrsa = 0. Nall", "bins=rbins) if (doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black',", "flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight') fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches", "25 mbins = np.arange(0,10, 0.1, dtype='float') qbins = np.arange(0,1, 0.1,", "Andrew's circularization plots eccAll = [] eccObs = [] eccRec", "stellar mass mMean = 0.5 #minimum number of lines to", "data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP", "eccObs = np.concatenate(eccObs) eccRec = np.concatenate(eccRec) pAll = np.concatenate(pAll) pObs", "maghObs += maghObs0/Nall*Nmult rhObs += rhObs0/Nall*Nmult #Rec recCombined = pd.DataFrame()", "(raw, log):\",np.sum(recN), np.log10(np.sum(recN))) print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.) print(\"###################\") print(\"total in Prsa 15.8<r<19.5 P<1000d", "in filters: key = filt+'LSS_PERIOD' if (filt == 'all'): key", "np.histogram(rec[\"m2\"]/rec[\"m1\"], bins=qbins) ehRec0, eb = np.histogram(rec[\"e\"], bins=ebins) lphRec0, lpb =", "','+f+'histRec' outline += '\\n' fl.write(outline) for i in range(len(bin_edges)): outline", "save the histograms saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist')", "= 0. Nall = len(data.index)/intNorm ###is this correct? (and the", "m3=0.5*units.solMass): Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day) #similar to", "# lw = 0.5 # ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw) #ax2.set_ylabel('CDF')", "fb *= RagNormal(np.log10(Phs), cdf = True) print(\"fb, Phs = \",", "= 0. fiN = 0. fioN = 0. firN =", "f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight')", "of period cutoff at 1000 days # Dataframes to write", "input period ######################### ######################### # Baseline M67 long script --", "len(prsa.index) if (Nall >= Nlim): #create histograms #All m1hAll0, m1b", "= True from matplotlib import pyplot as plt def file_len(fname):", "qhRec[filt] += qhRec0/Nall*Nmult ehRec[filt] += ehRec0/Nall*Nmult lphRec[filt] += lphRec0/Nall*Nmult dhRec[filt]", "bins=dbins) maghAll0, magb = np.histogram(data[\"appMagMean_r\"], bins=magbins) rhAll0, rb = np.histogram(data[\"r2\"]/data[\"r1\"],", "eccAll peccAll['p'] = pAll # Observable dataframe peccObs['e'] = eccObs", "file data = pd.read_csv(d+f, header = 2).fillna(-999) rF = 0.", "(filt == 'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if (Nrec >= Nlim): m1hRec0,", "pass return i + 1 def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass):", "lphObs0/Nall*Nmult dhObs += dhObs0/Nall*Nmult maghObs += maghObs0/Nall*Nmult rhObs += rhObs0/Nall*Nmult", "0. fiN = 0. fioN = 0. firN = 0.", "= len(prsa.index) if (Nall >= Nlim): #create histograms #All m1hAll0,", "& (data['p'] < 1000) & (data['p'] > 0.5)] # Appending", "saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec,", "lpb = np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999), bins=lpbins) dhRec0, db = np.histogram(rec[\"d\"], bins=dbins) maghRec0,", "['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header =", "RagNormal(x, cdf = False): mean = 5.03 std = 2.28", "np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for the hard-soft boundary Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value fb", "Observable dataframe peccObs['e'] = eccObs peccObs['p'] = pObs # Recovered", "= abs(data[key] - data['p'])/data['p'] halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP", "= np.zeros_like(rbins)[1:] #Recovered m1hRec = dict() qhRec = dict() ehRec", "at 1000 days # Dataframes to write to files later;", "= dict() for f in filters: m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f]", "= Nrec/Nall*Nmult raN = Nmult obN = Nobs/Nall*Nmult fiN =", "histAll/np.sum(histAll), color=c1) ax1.step(bin_edges, histObs/np.sum(histObs), color=c2) for f in filters: lw", "color=c2) for f in filters: lw = 1 if (f", "enumerate(f): pass return i + 1 def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass,", "boundary def fitRagfb(): x = [0.05, 0.1, 1, 8, 15]", "dhRec, db, 'd (kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec, magb, 'mag',", "print(\"number of binaries in recovered with gatspy (raw, log):\",np.sum(fileRecN), np.log10(np.sum(fileRecN)))", "'\\n' fl.write(outline) for i in range(len(bin_edges)): outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i]) for", "###################### #NEED TO ACCOUNT FOR THE BINARY FRACTION when combining", "ax1, ax2 histAll = np.insert(histAll,0,0) histObs = np.insert(histObs,0,0) for f", "cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches", "corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header =", "np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:] #Recovered m1hRec = dict() qhRec =", "= 'LSM_PERIOD' fullP = abs(data[key] - data['p'])/data['p'] halfP = abs(data[key]", "(kpc)', 'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs,", "log):\",np.sum(fileObsN), np.log10(np.sum(fileObsN))) print(\"number of binaries in recovered with gatspy (raw,", "histRec[f] = np.insert(histRec[f],0,0) #PDF ax1.step(bin_edges, histAll/np.sum(histAll), color=c1) ax1.step(bin_edges, histObs/np.sum(histObs), color=c2)", "maghRec0/Nall*Nmult rhRec[filt] += rhRec0/Nall*Nmult #for the mollweide if (filt ==", "for f in filters: # for i in range(len(histRec[f])): #", "we don't have to run analyse each time # Can", "binary fraction, as a function of mass dm1 = np.diff(m1b)", "pAll, pObs, pRec) # Appending lists with all the p/ecc", "quest - want to take p and ecc from each", "= len(rec.index) #I'd like to account for all filters here", "bins=magbins) rhAll0, rb = np.histogram(data[\"r2\"]/data[\"r1\"], bins=rbins) if (doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0),", "bins=lpbins) dhAll0, db = np.histogram(data[\"d\"], bins=dbins) maghAll0, magb = np.histogram(data[\"appMagMean_r\"],", "np.concatenate(eccAll) eccObs = np.concatenate(eccObs) eccRec = np.concatenate(eccRec) pAll = np.concatenate(pAll)", "print(\"###################\") print(\"number of binaries in input files (raw, log):\",np.sum(fileN), np.log10(np.sum(fileN)))", "ehAll0/Nall*Nmult lphAll += lphAll0/Nall*Nmult dhAll += dhAll0/Nall*Nmult maghAll += maghAll0/Nall*Nmult", "= len(obs.index) prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >", "0. Nall = len(data.index)/intNorm ###is this correct? (and the only", "lphAll0/np.sum(lphAll0), color='black', alpha=0.1) axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1) axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black',", "= np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:] maghAll =", "consider in file Nlim = 3 if (doIndividualPlots): fmass, axmass", "rhAll = np.zeros_like(rbins)[1:] #Observable m1hObs = np.zeros_like(mbins)[1:] qhObs = np.zeros_like(qbins)[1:]", "Dataframes to write to files later; 3 files for each", "eccRec peccRec['p'] = pRec # print('Final Dataframes:', peccAll, peccObs, peccRec)", "NallPrsa = NallPrsa/Nall*Nmult recFrac.append(rF) recN.append(rN) rawN.append(raN) obsN.append(obN) fileN.append(fiN) fileObsN.append(fioN) fileRecN.append(firN)", "# for i in range(len(histRec[f])): # cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges, cdfAll, color=c1)", "['e', 'p']) peccRec = pd.DataFrame(columns = ['e', 'p']) #Read in", "'LSM_PERIOD' fullP = abs(data[key] - data['p'])/data['p'] halfP = abs(data[key] -", "= m1b[:-1] + dm1/2. fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for the", "np.log10(np.sum(obsNPrsa))) print(\"total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(recNPrsa),", "dataframe peccAll['e'] = eccAll peccAll['p'] = pAll # Observable dataframe", "(Nall >= Nlim): #create histograms #All m1hAll0, m1b = np.histogram(data[\"m1\"],", "print(\"###################\") print(\"total in sample (raw, log):\",np.sum(rawN), np.log10(np.sum(rawN))) print(\"total observable (raw,", "dtype='float') lpbins = np.arange(-2, 10, 0.5, dtype='float') dbins = np.arange(0,", "dtype='float') #blanks for the histograms #All m1hAll = np.zeros_like(mbins)[1:] qhAll", "= 0. rN = 0. Nrec = 0. Nobs =", "'tight') print(\"###################\") print(\"number of binaries in input files (raw, log):\",np.sum(fileN),", "if (Nrec >= Nlim): m1hRec0, m1b = np.histogram(rec[\"m1\"], bins=mbins) qhRec0,", "= [] allNPrsa = [] obsNPrsa = [] recNPrsa =", "key = 'LSM_PERIOD' fullP = abs(data[key] - data['p'])/data['p'] halfP =", "as np import os from astropy.coordinates import SkyCoord from astropy", "#CDF #cdfAll = [] #cdfObs = [] #cdfRec = dict()", "[0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins,", "file_len(fname): i = 0 with open(fname) as f: for i,", "= '#A62B1F' #Dai Red c3 = '#BF8A26' #Dali Beige fig,ax1", "ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s", "= np.histogram(data[\"m2\"]/data[\"m1\"], bins=qbins) ehAll0, eb = np.histogram(data[\"e\"], bins=ebins) lphAll0, lpb", "as f: for i, l in enumerate(f): pass return i", "to our dataframes # All dataframe peccAll['e'] = eccAll peccAll['p']", "fit = fitter(init, x, y) return fit def RagNormal(x, cdf", "make the histograms d = \"./input_files/\" files = os.listdir(d) IDs", "'EBLSST_dhist') saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist') saveHist(rhAll, rhObs, rhRec,", "pAll = np.concatenate(pAll) pObs = np.concatenate(pObs) pRec = np.concatenate(pRec) #", "ehRec = dict() lphRec = dict() dhRec = dict() maghRec", "crowding # New script copied from quest - want to", "<= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) &", "['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']): c1 = '#5687A6' #Dali", "from each population (all, obs, rec) and put them into", "obsN = [] fileN = [] fileObsN = [] fileRecN", "Nlim): m1hRec0, m1b = np.histogram(rec[\"m1\"], bins=mbins) qhRec0, qb = np.histogram(rec[\"m2\"]/rec[\"m1\"],", "these lists because of period cutoff at 1000 days #", "0. fioN = 0. firN = 0. NallPrsa = 0.", "fileRecN = [] allNPrsa = [] obsNPrsa = [] recNPrsa", "< Pcut) | (halfP < Pcut) | (twiceP < Pcut))]", "peccAll, peccObs, peccRec) # print(peccRec.columns) # 3 letter code corresponds", "fmass, axmass = plt.subplots() fqrat, axqrat = plt.subplots() fecc, axecc", "= 0.5 #minimum number of lines to consider in file", "qhAll0/Nall*Nmult ehAll += ehAll0/Nall*Nmult lphAll += lphAll0/Nall*Nmult dhAll += dhAll0/Nall*Nmult", "ehObs += ehObs0/Nall*Nmult lphObs += lphObs0/Nall*Nmult dhObs += dhObs0/Nall*Nmult maghObs", "the mollweide if (filt == 'all'): Nrec = len(recCombined.index) rF", "# Appending for Andrew's files eccObs.append(prsaObs['e'].values) pObs.append(prsaObs['p'].values) if (Nobs >=", "+ 'massPDFall.pdf',format='pdf', bbox_inches = 'tight') fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches =", "color=c2) #for f in filters: # lw = 1 #", "= prsaRecCombined.append(prsaRec) # Going to use prsaRecCombined for ecc-p plots", "-20, 20) intCut, err = quad(RagNormal, -20, np.log10(365*10.)) intNorm =", "= models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.) fitter = fitting.LevMarLSQFitter() fit = fitter(init,", "script import pandas as pd import numpy as np import", "& (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] !=", "'\\n' fl.write(outline) if __name__ == \"__main__\": filters = ['u_', 'g_',", "np.histogram(data[\"r2\"]/data[\"r1\"], bins=rbins) if (doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0),", "rhObs0/Nall*Nmult #Rec recCombined = pd.DataFrame() prsaRecCombined = pd.DataFrame() for filt", "#write to a text file with open('./eblsst_files/' + fname+'.csv','w') as", "(data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP", "into separate file # Doing this so we don't have", "ehRec[filt] += ehRec0/Nall*Nmult lphRec[filt] += lphRec0/Nall*Nmult dhRec[filt] += dhRec0/Nall*Nmult maghRec[filt]", "ax1.set_xlabel(xtitle) #CDF #cdfAll = [] #cdfObs = [] #cdfRec =", "if (Nall >= Nlim): #create histograms #All m1hAll0, m1b =", "like to account for all filters here to have more", "NrecPrsa = 0. Nall = len(data.index)/intNorm ###is this correct? (and", "== 'all'): recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if (Nrec >= Nlim): m1hRec0, m1b", "f: for i, l in enumerate(f): pass return i +", "Nobs firN = Nrec NrecPrsa = len(prsaRecCombined.index) NrecPrsa = NrecPrsa/Nall*Nmult", "period ######################### ######################### # Baseline M67 long script -- NO", "import numpy as np import os from astropy.coordinates import SkyCoord", "'radPDFall.pdf',format='pdf', bbox_inches = 'tight') print(\"###################\") print(\"number of binaries in input", "(twiceP < Pcut))] Nrec = len(rec.index) #I'd like to account", "[] allNPrsa = [] obsNPrsa = [] recNPrsa = []", "each population (all, obs, rec) and put them into separate", "Blue (Andrew's AAS Poster) c2 = '#A62B1F' #Dai Red c3", "plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2 histAll", "np.histogram(rec[\"appMagMean_r\"], bins=magbins) rhRec0, rb = np.histogram(rec[\"r2\"]/rec[\"r1\"], bins=rbins) m1hRec[filt] += m1hRec0/Nall*Nmult", "P<1000d sample (raw, log):\",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print(\"total observable in Prsa 15.8<r<19.5", "0.2, dtype='float') #blanks for the histograms #All m1hAll = np.zeros_like(mbins)[1:]", "scipy.stats.norm.cdf(x,mean,std) return scipy.stats.norm.pdf(x,mean,std) def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname,", "np.log10(np.sum(fileObsN))) print(\"number of binaries in recovered with gatspy (raw, log):\",np.sum(fileRecN),", "#Obs obs = data.loc[data['LSM_PERIOD'] != -999] Nobs = len(obs.index) prsaObs", "lphAll += lphAll0/Nall*Nmult dhAll += dhAll0/Nall*Nmult maghAll += maghAll0/Nall*Nmult rhAll", "cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered')", "= np.histogram(data[\"d\"], bins=dbins) maghAll0, magb = np.histogram(data[\"appMagMean_r\"], bins=magbins) rhAll0, rb", "rhRec[filt] += rhRec0/Nall*Nmult #for the mollweide if (filt == 'all'):", "Phs.decompose().to(units.day) #similar to field, but limiting by the hard-soft boundary", "np.histogram(obs[\"m1\"], bins=mbins) qhObs0, qb = np.histogram(obs[\"m2\"]/obs[\"m1\"], bins=qbins) ehObs0, eb =", "in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html y = [0.20, 0.35,", "= np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.) return Phs.decompose().to(units.day) #similar to field, but", "= [] for i, f in enumerate(files): print(round(i/len(files),4), f) fl", "= [] Dec = [] recFrac = [] recN =", "rhRec0, rb = np.histogram(rec[\"r2\"]/rec[\"r1\"], bins=rbins) m1hRec[filt] += m1hRec0/Nall*Nmult qhRec[filt] +=", "= np.concatenate(pObs) pRec = np.concatenate(pRec) # print('Ecc lists:', eccAll, eccObs,", "= [] # Using prsa dataframes for these lists because", "histograms saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist') saveHist(qhAll, qhObs,", "'eccPDFall.pdf',format='pdf', bbox_inches = 'tight') flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight')", "< Pcut) | (twiceP < Pcut))] prsaRec = data.loc[(data['appMagMean_r'] <=", "twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p']) rec = data.loc[(data[key] != -999)", "= [] recNPrsa = [] # Lists for period and", "= plt.subplots() fmag, axmag = plt.subplots() frad, axrad = plt.subplots()", "to use prsaRecCombined for ecc-p plots to account for all", "dhObs = np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:] #Recovered", "np.zeros_like(dbins)[1:] maghObs = np.zeros_like(magbins)[1:] rhObs = np.zeros_like(rbins)[1:] #Recovered m1hRec =", "lists with all the p/ecc values to our dataframes #", "if (f == 'all'): # lw = 0.5 # ax2.step(bin_edges,", "qb = np.histogram(rec[\"m2\"]/rec[\"m1\"], bins=qbins) ehRec0, eb = np.histogram(rec[\"e\"], bins=ebins) lphRec0,", "Nmult obN = Nobs/Nall*Nmult fiN = Nall fioN = Nobs", "ax.set_ylabel(\"Dec\",fontsize=16) mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4)", "if (doIndividualPlots): fmass, axmass = plt.subplots() fqrat, axqrat = plt.subplots()", "obs = data.loc[data['LSM_PERIOD'] != -999] Nobs = len(obs.index) prsaObs =", "cdfRec[f], color=c3, linewidth=lw) #ax2.set_ylabel('CDF') #ax2.set_xlabel(xtitle) fig.subplots_adjust(hspace=0) fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches", "m1hRec = dict() qhRec = dict() ehRec = dict() lphRec", "file # Doing this so we don't have to run", "i, f in enumerate(files): print(round(i/len(files),4), f) fl = file_len(d+f) if", "binaries in recovered with gatspy (raw, log):\",np.sum(fileRecN), np.log10(np.sum(fileRecN))) print(\"recovered/observable*100 with", "0.75] init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.) fitter = fitting.LevMarLSQFitter() fit", "+= maghAll0/Nall*Nmult rhAll += rhAll0/Nall*Nmult #Obs obs = data.loc[data['LSM_PERIOD'] !=", "Poster) c2 = '#A62B1F' #Dai Red c3 = '#BF8A26' #Dali", "# cdfObs.append(np.sum(histObs[:i])/np.sum(histObs)) # for f in filters: # for i", "= intCut/intAll #cutoff in percent error for \"recovered\" Pcut =", "ebins = np.arange(0, 1.05, 0.05, dtype='float') lpbins = np.arange(-2, 10,", "#ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s =", "#for Quest import matplotlib matplotlib.use('Agg') doIndividualPlots = True from matplotlib", "< Pcut))] prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8)", "np.log10(np.sum(obsN))) print(\"total recovered (raw, log):\",np.sum(recN), np.log10(np.sum(recN))) print(\"recovered/observable*100:\",np.sum(recN)/np.sum(obsN)*100.) print(\"###################\") print(\"total in", "= np.histogram(rec[\"r2\"]/rec[\"r1\"], bins=rbins) m1hRec[filt] += m1hRec0/Nall*Nmult qhRec[filt] += qhRec0/Nall*Nmult ehRec[filt]", "np.concatenate(pRec) # print('Ecc lists:', eccAll, eccObs, eccRec) # print('P lists:',", "m1hAll += m1hAll0/Nall*Nmult qhAll += qhAll0/Nall*Nmult ehAll += ehAll0/Nall*Nmult lphAll", "plt def file_len(fname): i = 0 with open(fname) as f:", "recCombined.drop_duplicates(inplace=True) prsaRecCombined.drop_duplicates(inplace=True) if (Nrec >= Nlim): m1hRec0, m1b = np.histogram(rec[\"m1\"],", "np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN)) # Concatenating", "from quest - want to take p and ecc from", "in percent error for \"recovered\" Pcut = 0.1 #assumed mean", "files for each sub-population - append everything to these peccAll", "Nobs/Nall*Nmult fiN = Nall fioN = Nobs firN = Nrec", "print('P lists:', pAll, pObs, pRec) # Appending lists with all", "append everything to these peccAll = pd.DataFrame(columns = ['e', 'p'])", "fit def RagNormal(x, cdf = False): mean = 5.03 std", "fioN = 0. firN = 0. NallPrsa = 0. NobsPrsa", "qb = np.histogram(obs[\"m2\"]/obs[\"m1\"], bins=qbins) ehObs0, eb = np.histogram(obs[\"e\"], bins=ebins) lphObs0,", "the mollweide coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs') lGal =", "= f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered') f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches =", "4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'log10(N) recovered') f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf',", "bins=rbins) m1hRec[filt] += m1hRec0/Nall*Nmult qhRec[filt] += qhRec0/Nall*Nmult ehRec[filt] += ehRec0/Nall*Nmult", "0. NallPrsa = 0. NobsPrsa = 0. NrecPrsa = 0.", "0. raN = 0. obN = 0. fiN = 0.", "- append everything to these peccAll = pd.DataFrame(columns = ['e',", "| (halfP < Pcut) | (twiceP < Pcut))] prsaRec =", "+ dm1/2. fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val)) #account for the hard-soft boundary", "m1b = np.histogram(rec[\"m1\"], bins=mbins) qhRec0, qb = np.histogram(rec[\"m2\"]/rec[\"m1\"], bins=qbins) ehRec0,", "data and make the histograms d = \"./input_files/\" files =", "rF = Nrec/Nall rN = Nrec/Nall*Nmult raN = Nmult obN", "rb = np.histogram(data[\"r2\"]/data[\"r1\"], bins=rbins) if (doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1)", "= np.zeros_like(lpbins)[1:] dhAll = np.zeros_like(dbins)[1:] maghAll = np.zeros_like(magbins)[1:] rhAll =", "= '#5687A6' #Dali Blue (Andrew's AAS Poster) c2 = '#A62B1F'", "= np.histogram(obs[\"r2\"]/obs[\"r1\"], bins=rbins) m1hObs += m1hObs0/Nall*Nmult qhObs += qhObs0/Nall*Nmult ehObs", "in the header header = pd.read_csv(d+f, nrows=1) ###################### #NEED TO", "= ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4) ax.set_xlabel(\"RA\",fontsize=16) ax.set_ylabel(\"Dec\",fontsize=16)", "len(obs.index) prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8)", "fontsize = 16) ax1.set_xlabel(xtitle) #CDF #cdfAll = [] #cdfObs =", "bbox_inches = 'tight') f, ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True)", "= dict() dhRec = dict() maghRec = dict() rhRec =", "= abs(data[key] - 0.5*data['p'])/(0.5*data['p']) twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p']) rec", "the Raghavan binary fraction fit fbFit= fitRagfb() print(fbFit) #to normalize", "eccAll = np.concatenate(eccAll) eccObs = np.concatenate(eccObs) eccRec = np.concatenate(eccRec) pAll", "+= maghObs0/Nall*Nmult rhObs += rhObs0/Nall*Nmult #Rec recCombined = pd.DataFrame() prsaRecCombined", "1000) & (data['p'] > 0.5)] # Appending for Andrew eccAll.append(prsa['e'].values)", "script for p-ecc plots # Quest paths in this version", "histograms #All m1hAll0, m1b = np.histogram(data[\"m1\"], bins=mbins) qhAll0, qb =", "in this version of script import pandas as pd import", "(OC/GC, baseline/colossus, crowding/no crowding) peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p']) peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv',", "in input files (raw, log):\",np.sum(fileN), np.log10(np.sum(fileN))) print(\"number of binaries in", "Baseline', fontsize = 16) ax1.set_xlabel(xtitle) #CDF #cdfAll = [] #cdfObs", "dtype='float') dbins = np.arange(0, 40, 1, dtype='float') magbins = np.arange(11,", "m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1) axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black',", "bins=mbins) qhAll0, qb = np.histogram(data[\"m2\"]/data[\"m1\"], bins=qbins) ehAll0, eb = np.histogram(data[\"e\"],", "& ( (fullP < Pcut) | (halfP < Pcut) |", "= [] #cdfObs = [] #cdfRec = dict() #for f", "cbar.set_label(r'% recovered') f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight') f, ax", "analyse each time # Can write separate script for p-ecc", "filters: # for i in range(len(histRec[f])): # cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f])) #ax2.step(bin_edges, cdfAll,", "everything to these peccAll = pd.DataFrame(columns = ['e', 'p']) peccObs", "ecc from each population (all, obs, rec) and put them", "& (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP", "circularization plots eccAll = [] eccObs = [] eccRec =", "color=c1) ax1.step(bin_edges, histObs/np.sum(histObs), color=c2) for f in filters: lw =", "if (doIndividualPlots): axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1) axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1)", "cmap='viridis_r', s = 4) cbar = f.colorbar(mlw, shrink=0.7) cbar.set_label(r'% recovered')", "ax = plt.subplots(subplot_kw={'projection': \"mollweide\"}, figsize=(8,5)) ax.grid(True) #ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw =", "= 1 # if (f == 'all'): # lw =", "f in filters: m1hRec[f] = np.zeros_like(mbins)[1:] qhRec[f] = np.zeros_like(qbins)[1:] ehRec[f]", "pAll.append(prsa['p'].values) NallPrsa = len(prsa.index) if (Nall >= Nlim): #create histograms", "and eccentricity for Andrew's circularization plots eccAll = [] eccObs", "np.concatenate(eccObs) eccRec = np.concatenate(eccRec) pAll = np.concatenate(pAll) pObs = np.concatenate(pObs)", "def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass): Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.)", "fileObsN = [] fileRecN = [] allNPrsa = [] obsNPrsa", "saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist') #make the mollweide coords", "'y_','all']): c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster) c2", "= 1. RA.append(header['OpSimRA']) Dec.append(header['OpSimDec']) #read in rest of the file", "rawN = [] obsN = [] fileN = [] fileObsN", "-999] Nobs = len(obs.index) prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) &", "= np.zeros_like(qbins)[1:] ehAll = np.zeros_like(ebins)[1:] lphAll = np.zeros_like(lpbins)[1:] dhAll =", "= np.histogram(np.ma.log10(obs[\"p\"].values).filled(-999), bins=lpbins) dhObs0, db = np.histogram(obs[\"d\"], bins=dbins) maghObs0, magb", "from matplotlib import pyplot as plt def file_len(fname): i =", "[] recNPrsa = [] # Lists for period and eccentricity", "ehRec0, eb = np.histogram(rec[\"e\"], bins=ebins) lphRec0, lpb = np.histogram(np.ma.log10(rec[\"p\"].values).filled(-999), bins=lpbins)", "bbox_inches = 'tight') if (doIndividualPlots): fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches =", "rest of the file data = pd.read_csv(d+f, header = 2).fillna(-999)", "firN = Nrec NrecPrsa = len(prsaRecCombined.index) NrecPrsa = NrecPrsa/Nall*Nmult NobsPrsa", "+= lphRec0/Nall*Nmult dhRec[filt] += dhRec0/Nall*Nmult maghRec[filt] += maghRec0/Nall*Nmult rhRec[filt] +=", "#ax.set_xlabel(r\"$l$\",fontsize=16) #ax.set_ylabel(r\"$b$\",fontsize=16) #mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s =", "dhAll += dhAll0/Nall*Nmult maghAll += maghAll0/Nall*Nmult rhAll += rhAll0/Nall*Nmult #Obs", "Nrec = len(recCombined.index) rF = Nrec/Nall rN = Nrec/Nall*Nmult raN", "in Prsa 15.8<r<19.5 P<1000d sample (raw, log):\",np.sum(allNPrsa), np.log10(np.sum(allNPrsa))) print(\"total observable", "and ecc lists eccAll = np.concatenate(eccAll) eccObs = np.concatenate(eccObs) eccRec", "& (data['LSM_PERIOD'] != -999)] NobsPrsa = len(prsaObs.index) # Appending for", "= 0.1 #assumed mean stellar mass mMean = 0.5 #minimum", "cdf = False): mean = 5.03 std = 2.28 if", "fig.subplots_adjust(hspace=0) fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight') #write to a", "['e', 'p']) peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p']) #plot and save", "15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html" ]
[ "as cms import os process = cms.Process(\"summary\") process.MessageLogger = cms.Service(", "#process.BeamSpotRcdPrinter.startIOV = 1354018504835073 #process.BeamSpotRcdPrinter.endIOV = 1374668707594734 ### 2018D Prompt #process.BeamSpotRcdPrinter.tagName", "process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\") ### 2018 Prompt process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" process.BeamSpotRcdPrinter.startIOV = 1350646955507767", "= 1316235677532161 ### 2018 ABC ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV", "### 2018 ABC ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1354018504835073", "= \"BeamSpotObjects_PCL_byLumi_v0_prompt\" #process.BeamSpotRcdPrinter.startIOV = 1377280047710242 #process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.p =", "process = cms.Process(\"summary\") process.MessageLogger = cms.Service( \"MessageLogger\", debugModules = cms.untracked.vstring(", "\"BeamSpotObjects_PCL_byLumi_v0_prompt\" process.BeamSpotRcdPrinter.startIOV = 1350646955507767 process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.BeamSpotRcdPrinter.output = \"summary2018_Prompt.txt\"", "cms import os process = cms.Process(\"summary\") process.MessageLogger = cms.Service( \"MessageLogger\",", "cms.Service( \"MessageLogger\", debugModules = cms.untracked.vstring( \"*\" ), cout = cms.untracked.PSet(", "process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) )", "2018 Prompt process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" process.BeamSpotRcdPrinter.startIOV = 1350646955507767 process.BeamSpotRcdPrinter.endIOV =", "process.load(\"CondCore.CondDB.CondDB_cfi\") process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\") ### 2018 Prompt process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" process.BeamSpotRcdPrinter.startIOV =", "process.MessageLogger = cms.Service( \"MessageLogger\", debugModules = cms.untracked.vstring( \"*\" ), cout", ") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source =", "= 1354018504835073 #process.BeamSpotRcdPrinter.endIOV = 1374668707594734 ### 2018D Prompt #process.BeamSpotRcdPrinter.tagName =", "= cms.untracked.vstring( \"cout\" ) ) process.maxEvents = cms.untracked.PSet( input =", "Prompt process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" process.BeamSpotRcdPrinter.startIOV = 1350646955507767 process.BeamSpotRcdPrinter.endIOV = 1406876667347162", ") process.load(\"CondCore.CondDB.CondDB_cfi\") process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\") ### 2018 Prompt process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" process.BeamSpotRcdPrinter.startIOV", "ABC ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1354018504835073 #process.BeamSpotRcdPrinter.endIOV =", "cms.untracked.string( \"DEBUG\" ) ), destinations = cms.untracked.vstring( \"cout\" ) )", "= cms.Process(\"summary\") process.MessageLogger = cms.Service( \"MessageLogger\", debugModules = cms.untracked.vstring( \"*\"", "\"*\" ), cout = cms.untracked.PSet( threshold = cms.untracked.string( \"DEBUG\" )", "import os process = cms.Process(\"summary\") process.MessageLogger = cms.Service( \"MessageLogger\", debugModules", "firstRun = cms.untracked.uint32(1) ) process.load(\"CondCore.CondDB.CondDB_cfi\") process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\") ### 2018 Prompt process.BeamSpotRcdPrinter.tagName", "2017 ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1275820035276801 #process.BeamSpotRcdPrinter.endIOV =", "= cms.untracked.vstring( \"*\" ), cout = cms.untracked.PSet( threshold = cms.untracked.string(", ") ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source", "cms.Process(\"summary\") process.MessageLogger = cms.Service( \"MessageLogger\", debugModules = cms.untracked.vstring( \"*\" ),", "= \"BeamSpotObjects_PCL_byLumi_v0_prompt\" process.BeamSpotRcdPrinter.startIOV = 1350646955507767 process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.BeamSpotRcdPrinter.output =", "numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"CondCore.CondDB.CondDB_cfi\") process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\") ###", "cms.untracked.int32(1) ) process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun =", "1275820035276801 #process.BeamSpotRcdPrinter.endIOV = 1316235677532161 ### 2018 ABC ReReco #process.BeamSpotRcdPrinter.tagName =", "### 2018 Prompt process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" process.BeamSpotRcdPrinter.startIOV = 1350646955507767 process.BeamSpotRcdPrinter.endIOV", "#process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" #process.BeamSpotRcdPrinter.startIOV = 1377280047710242 #process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.p", "\"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1275820035276801 #process.BeamSpotRcdPrinter.endIOV = 1316235677532161 ### 2018 ABC", ") process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1)", "ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1354018504835073 #process.BeamSpotRcdPrinter.endIOV = 1374668707594734", "cms.untracked.PSet( threshold = cms.untracked.string( \"DEBUG\" ) ), destinations = cms.untracked.vstring(", "= cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"CondCore.CondDB.CondDB_cfi\") process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\") ### 2018", "cms.untracked.vstring( \"*\" ), cout = cms.untracked.PSet( threshold = cms.untracked.string( \"DEBUG\"", "\"cout\" ) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )", "FWCore.ParameterSet.Config as cms import os process = cms.Process(\"summary\") process.MessageLogger =", "= cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"CondCore.CondDB.CondDB_cfi\")", "2018D Prompt #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" #process.BeamSpotRcdPrinter.startIOV = 1377280047710242 #process.BeamSpotRcdPrinter.endIOV =", "\"summary2018_Prompt.txt\" ### 2017 ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1275820035276801", "process.BeamSpotRcdPrinter.output = \"summary2018_Prompt.txt\" ### 2017 ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV", "### 2018D Prompt #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" #process.BeamSpotRcdPrinter.startIOV = 1377280047710242 #process.BeamSpotRcdPrinter.endIOV", "cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source = cms.Source(\"EmptySource\", numberEventsInRun =", "### 2017 ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1275820035276801 #process.BeamSpotRcdPrinter.endIOV", "#process.BeamSpotRcdPrinter.endIOV = 1374668707594734 ### 2018D Prompt #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" #process.BeamSpotRcdPrinter.startIOV", "ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1275820035276801 #process.BeamSpotRcdPrinter.endIOV = 1316235677532161", "\"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1354018504835073 #process.BeamSpotRcdPrinter.endIOV = 1374668707594734 ### 2018D Prompt", "cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"CondCore.CondDB.CondDB_cfi\") process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\") ### 2018 Prompt", "process.BeamSpotRcdPrinter.startIOV = 1350646955507767 process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.BeamSpotRcdPrinter.output = \"summary2018_Prompt.txt\" ###", "process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.BeamSpotRcdPrinter.output = \"summary2018_Prompt.txt\" ### 2017 ReReco #process.BeamSpotRcdPrinter.tagName", "= cms.untracked.PSet( threshold = cms.untracked.string( \"DEBUG\" ) ), destinations =", "destinations = cms.untracked.vstring( \"cout\" ) ) process.maxEvents = cms.untracked.PSet( input", "= \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1354018504835073 #process.BeamSpotRcdPrinter.endIOV = 1374668707594734 ### 2018D", "= cms.untracked.string( \"DEBUG\" ) ), destinations = cms.untracked.vstring( \"cout\" )", "= 1406876667347162 process.BeamSpotRcdPrinter.output = \"summary2018_Prompt.txt\" ### 2017 ReReco #process.BeamSpotRcdPrinter.tagName =", "\"DEBUG\" ) ), destinations = cms.untracked.vstring( \"cout\" ) ) process.maxEvents", "= 1374668707594734 ### 2018D Prompt #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" #process.BeamSpotRcdPrinter.startIOV =", "= 1350646955507767 process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.BeamSpotRcdPrinter.output = \"summary2018_Prompt.txt\" ### 2017", "= \"summary2018_Prompt.txt\" ### 2017 ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV =", "#process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1354018504835073 #process.BeamSpotRcdPrinter.endIOV = 1374668707594734 ###", "#process.BeamSpotRcdPrinter.endIOV = 1316235677532161 ### 2018 ABC ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\"", "), cout = cms.untracked.PSet( threshold = cms.untracked.string( \"DEBUG\" ) ),", "cout = cms.untracked.PSet( threshold = cms.untracked.string( \"DEBUG\" ) ), destinations", "), destinations = cms.untracked.vstring( \"cout\" ) ) process.maxEvents = cms.untracked.PSet(", "1354018504835073 #process.BeamSpotRcdPrinter.endIOV = 1374668707594734 ### 2018D Prompt #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\"", "threshold = cms.untracked.string( \"DEBUG\" ) ), destinations = cms.untracked.vstring( \"cout\"", "process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" process.BeamSpotRcdPrinter.startIOV = 1350646955507767 process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.BeamSpotRcdPrinter.output", ") ), destinations = cms.untracked.vstring( \"cout\" ) ) process.maxEvents =", "input = cms.untracked.int32(1) ) process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1),", "\"BeamSpotObjects_PCL_byLumi_v0_prompt\" #process.BeamSpotRcdPrinter.startIOV = 1377280047710242 #process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.p = cms.Path(process.BeamSpotRcdPrinter)", "os process = cms.Process(\"summary\") process.MessageLogger = cms.Service( \"MessageLogger\", debugModules =", "1350646955507767 process.BeamSpotRcdPrinter.endIOV = 1406876667347162 process.BeamSpotRcdPrinter.output = \"summary2018_Prompt.txt\" ### 2017 ReReco", "= \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1275820035276801 #process.BeamSpotRcdPrinter.endIOV = 1316235677532161 ### 2018", "<filename>CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py import FWCore.ParameterSet.Config as cms import os process = cms.Process(\"summary\")", "= cms.untracked.int32(1) ) process.source = cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun", "= cms.untracked.uint32(1) ) process.load(\"CondCore.CondDB.CondDB_cfi\") process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\") ### 2018 Prompt process.BeamSpotRcdPrinter.tagName =", "import FWCore.ParameterSet.Config as cms import os process = cms.Process(\"summary\") process.MessageLogger", "1374668707594734 ### 2018D Prompt #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" #process.BeamSpotRcdPrinter.startIOV = 1377280047710242", "cms.untracked.uint32(1) ) process.load(\"CondCore.CondDB.CondDB_cfi\") process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\") ### 2018 Prompt process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\"", "2018 ABC ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1354018504835073 #process.BeamSpotRcdPrinter.endIOV", "= 1275820035276801 #process.BeamSpotRcdPrinter.endIOV = 1316235677532161 ### 2018 ABC ReReco #process.BeamSpotRcdPrinter.tagName", "= cms.Service( \"MessageLogger\", debugModules = cms.untracked.vstring( \"*\" ), cout =", "process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source = cms.Source(\"EmptySource\",", "Prompt #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_PCL_byLumi_v0_prompt\" #process.BeamSpotRcdPrinter.startIOV = 1377280047710242 #process.BeamSpotRcdPrinter.endIOV = 1406876667347162", "cms.untracked.vstring( \"cout\" ) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1)", "#process.BeamSpotRcdPrinter.startIOV = 1275820035276801 #process.BeamSpotRcdPrinter.endIOV = 1316235677532161 ### 2018 ABC ReReco", "#process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV = 1275820035276801 #process.BeamSpotRcdPrinter.endIOV = 1316235677532161 ###", "\"MessageLogger\", debugModules = cms.untracked.vstring( \"*\" ), cout = cms.untracked.PSet( threshold", "= cms.untracked.PSet( input = cms.untracked.int32(1) ) process.source = cms.Source(\"EmptySource\", numberEventsInRun", "cms.Source(\"EmptySource\", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(1) ) process.load(\"CondCore.CondDB.CondDB_cfi\") process.load(\"CondTools.BeamSpot.BeamSpotRcdPrinter_cfi\")", "1406876667347162 process.BeamSpotRcdPrinter.output = \"summary2018_Prompt.txt\" ### 2017 ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\"", "1316235677532161 ### 2018 ABC ReReco #process.BeamSpotRcdPrinter.tagName = \"BeamSpotObjects_LumiBased_v4_offline\" #process.BeamSpotRcdPrinter.startIOV =", "debugModules = cms.untracked.vstring( \"*\" ), cout = cms.untracked.PSet( threshold =" ]
[ "path from rest_framework_simplejwt.views import ( TokenObtainPairView, TokenRefreshView, TokenVerifyView ) urlpatterns", "from django.urls import path from rest_framework_simplejwt.views import ( TokenObtainPairView, TokenRefreshView,", "django.urls import path from rest_framework_simplejwt.views import ( TokenObtainPairView, TokenRefreshView, TokenVerifyView", "TokenRefreshView, TokenVerifyView ) urlpatterns = [ path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('refresh/',", "( TokenObtainPairView, TokenRefreshView, TokenVerifyView ) urlpatterns = [ path('obtain/', TokenObtainPairView.as_view(),", ") urlpatterns = [ path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('refresh/', TokenRefreshView.as_view(), name='token_refresh'),", "import path from rest_framework_simplejwt.views import ( TokenObtainPairView, TokenRefreshView, TokenVerifyView )", "= [ path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('refresh/', TokenRefreshView.as_view(), name='token_refresh'), path('verify/', TokenVerifyView.as_view(),", "import ( TokenObtainPairView, TokenRefreshView, TokenVerifyView ) urlpatterns = [ path('obtain/',", "path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('refresh/', TokenRefreshView.as_view(), name='token_refresh'), path('verify/', TokenVerifyView.as_view(), name='token_verify'), ]", "rest_framework_simplejwt.views import ( TokenObtainPairView, TokenRefreshView, TokenVerifyView ) urlpatterns = [", "TokenObtainPairView, TokenRefreshView, TokenVerifyView ) urlpatterns = [ path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'),", "from rest_framework_simplejwt.views import ( TokenObtainPairView, TokenRefreshView, TokenVerifyView ) urlpatterns =", "urlpatterns = [ path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('refresh/', TokenRefreshView.as_view(), name='token_refresh'), path('verify/',", "TokenVerifyView ) urlpatterns = [ path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('refresh/', TokenRefreshView.as_view(),", "[ path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'), path('refresh/', TokenRefreshView.as_view(), name='token_refresh'), path('verify/', TokenVerifyView.as_view(), name='token_verify')," ]
[ "to view available # drive letters def __init__(self, host, port,", "= 22 else: result['port'] = 21 # ftp port default", "path = utf8_fix(path) if not self.exists(path): p = self.parent( path", "= set(self.listdir(p)) return n in lst def isdir(self,path): path =", "a directory generator # which first loads the directory, then", "sys.stderr.write(\"%s\\n\"%e) super().close() def fix(self, path): return utf8_fix(path) def join(self,*args): return", "__enter__(self): return self def __exit__(self,typ,val,tb): if typ is None: self.close()", "ftp.size() fails for various reasons unless the file exists #", "ValueError(\"invalid: %s\"%url) def utf8_fix(s): return ''.join([ a if ord(a)<128 else", "and add the CRLF to the input argument using the", "sys import re reftp = re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)') def parseFTPurl( url ):", "g[2] or \"\", \"password\" : g[3] or \"\", \"hostname\" :", "\"%02X\"%ord(a) for a in s]) class FTPWriter(object): \"\"\"docstring for FTPWriter\"\"\"", "as e: sys.stderr.write(\"Error Closing FTP connection\\n\") sys.stderr.write(\"%s\\n\"%e) super().close() def fix(self,", "under other use cases. # I can't cache listdir calls,", "self.ftp.login(username,password) self.hostname = \"%s:%d\"%(host,port) def root(self): return \"/\" def close(self):", "= { \"mode\" : g[0], \"username\" : g[2] or \"\",", "and also fails whenm given a byte string. \"\"\" #", "FTP connection\\n\") sys.stderr.write(\"%s\\n\"%e) super().close() def fix(self, path): return utf8_fix(path) def", "file. I can infer that a failure to # determine", "Exception as e: print(\"ftp mkd error: %s\"%e) def split(self,path): return", "return FTPReader(self.ftp,path) raise NotImplementedError(mode) def exists(self,path): path = utf8_fix(path) p,n=posixpath.split(path)", "utf8_fix(self.path) self.ftp.retrbinary(text, self.file.write) self.file.seek(0) def read(self,n=None): return self.file.read(n) def seek(self,pos,whence=SEEK_SET):", "= utf8_fix(path) try: return self.ftp.size(path) is None except error_perm: #", "except Exception as e: print(\"ftp mkd error: %s\"%e) def split(self,path):", "path self.file = BytesIO() # open the file text =", "result['port'] = 22 else: result['port'] = 21 # ftp port", "posixpath.join(root,path) return posixpath.normpath( path ) def listdir(self,path): return self.ftp.nlst(path) def", "an accessable file. I can infer that a failure to", "def root(self): return \"/\" def close(self): try: self.ftp.quit() except all_errors", "FTP,error_perm, all_errors import posixpath from io import BytesIO,SEEK_SET from .source", "None except error_perm: # TODO: to think about more later,", "read(self,n=None): return self.file.read(n) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return", "if I Already think it exists. Under the current FTP", "the size means that the path is a directory, #", "): if self.isdir(path): try: self.ftp.rmd(path) except Exception as e: print(\"ftp", "or \"\", \"port\" : int(g[5][1:]) if g[5] else 0, \"path\"", "for a in s]) class FTPWriter(object): \"\"\"docstring for FTPWriter\"\"\" def", "loops over # loaded items. # TODO: on windows we", "not self.exists(path): p = self.parent( path ) try: if not", "ftp port default return result raise ValueError(\"invalid: %s\"%url) def utf8_fix(s):", "\"/\", } if result['port'] == 0: if result['mode'] == ssh:", "exists # and is an accessable file. I can infer", "\"\", \"port\" : int(g[5][1:]) if g[5] else 0, \"path\" :", "import sys import re reftp = re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)') def parseFTPurl( url", "for thus file system :( try: size = self.ftp.size(path) except", "= re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)') def parseFTPurl( url ): m = reftp.match( url", "def __init__(self, ftp, path): super(FTPReader, self).__init__() self.ftp = ftp self.path", "also fails whenm given a byte string. \"\"\" # TODO:", "self.ftp.connect(host,port) self.ftp.login(username,password) self.hostname = \"%s:%d\"%(host,port) def root(self): return \"/\" def", "else 0, \"path\" : g[6] or \"/\", } if result['port']", "except error_perm: # TODO: to think about more later, #", "FTP() self.ftp.connect(host,port) self.ftp.login(username,password) self.hostname = \"%s:%d\"%(host,port) def root(self): return \"/\"", "string ? # empty string returns drives p,_ = posixpath.split(path)", "a if ord(a)<128 else \"%02X\"%ord(a) for a in s]) class", "I could # use that to determine if the file", "def mkdir(self,path): # this is a really ugly quick and", "result['port'] == 0: if result['mode'] == ssh: result['port'] = 22", "not fast for thus file system :( try: size =", "from io import BytesIO,SEEK_SET from .source import DataSource import sys", "or \"\", \"hostname\" : g[4] or \"\", \"port\" : int(g[5][1:])", "try: return self.ftp.size(path) is None except error_perm: # TODO: to", "def listdir(self,path): return self.ftp.nlst(path) def parent(self,path): # TODO: if path", "directory rmdir() path = utf8_fix(path) if self.exists( path ): if", "url ) if m: g = m.groups() result = {", "delete error: %s\"%e) else: try: self.ftp.delete(path) except Exception as e:", "self.hostname = \"%s:%d\"%(host,port) def root(self): return \"/\" def close(self): try:", "fails for various reasons unless the file exists # and", "def stat_fast(self,path): # not fast for thus file system :(", "as e: print(\"ftp delete error: %s\"%e) else: try: self.ftp.delete(path) except", "else: try: self.ftp.delete(path) except Exception as e: print(\"ftp delete error:", "directory generator # which first loads the directory, then loops", "self.ftp.mkd(path) except Exception as e: print(\"ftp mkd error: %s\"%e) def", "error: %s\"%e) def split(self,path): return posixpath.split(path) def splitext(self,path): return posixpath.splitext(path)", "the command fails when given unicode text (ord > 127)", "FTPWriter(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPWriter, self).__init__()", "self).__init__() self.ftp = ftp self.path = path self.file = BytesIO()", "if the file exists return True#self.exists( path ) def mkdir(self,path):", "first loads the directory, then loops over # loaded items.", "path self.file = BytesIO() def write(self,data): return self.file.write(data) def seek(self,pos,whence=SEEK_SET):", "g[5] else 0, \"path\" : g[6] or \"/\", } if", "return self.file.write(data) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return self.file.tell()", "path): super(FTPWriter, self).__init__() self.ftp = ftp self.path = path self.file", ") if m: g = m.groups() result = { \"mode\"", "I can't cache listdir calls, but if I could, then", "FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPReader, self).__init__() self.ftp = ftp", "self.close() class FTPSource(DataSource): \"\"\" there is some sort of problem", "is a really ugly quick and dirty solution path =", "def breakpath(self,path): return [ x for x in path.replace(\"/\",\"\\\\\").split(\"\\\\\") if", "NotImplementedError(mode) def exists(self,path): path = utf8_fix(path) p,n=posixpath.split(path) lst = set(self.listdir(p))", "for FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPWriter, self).__init__() self.ftp =", "only asking if a path is a directory # if", "mode==\"wb\": return FTPWriter(self.ftp,path) elif mode==\"rb\": return FTPReader(self.ftp,path) raise NotImplementedError(mode) def", "{ \"mode\" : g[0], \"username\" : g[2] or \"\", \"password\"", "size is None, \"isLink\": False, \"mtime\" : 0, \"ctime\" :", "m.groups() result = { \"mode\" : g[0], \"username\" : g[2]", "n in lst def isdir(self,path): path = utf8_fix(path) try: return", "the file exists # and is an accessable file. I", "root and not path.startswith(\"/\"): path = posixpath.join(root,path) return posixpath.normpath( path", "if result['port'] == 0: if result['mode'] == ssh: result['port'] =", "\"\"\" there is some sort of problem with utf-8/latin-1 and", "is some sort of problem with utf-8/latin-1 and ftplib storbinary", "import posixpath from io import BytesIO,SEEK_SET from .source import DataSource", "= BytesIO() # open the file text = \"RETR \"", "solution path = utf8_fix(path) if not self.exists(path): p = self.parent(", "for x in path.replace(\"/\",\"\\\\\").split(\"\\\\\") if x ] def relpath(self,path,base): return", "all_errors as e: sys.stderr.write(\"Error Closing FTP connection\\n\") sys.stderr.write(\"%s\\n\"%e) super().close() def", "self.ftp.quit() except all_errors as e: sys.stderr.write(\"Error Closing FTP connection\\n\") sys.stderr.write(\"%s\\n\"%e)", "utf8_fix(path) try: return self.ftp.size(path) is None except error_perm: # TODO:", "command fails when given unicode text (ord > 127) and", "p,_ = posixpath.split(path) return p def move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath) def delete(self,path):", "p = self.parent( path ) try: if not self.exists(p): self.ftp.mkd(", "Closing FTP connection\\n\") sys.stderr.write(\"%s\\n\"%e) super().close() def fix(self, path): return utf8_fix(path)", "result = { \"name\" : self.split(path)[1], \"size\" : size or", "size or 0, \"name\" : self.split(path)[1], \"mode\" : 0 }", "support removing directory rmdir() path = utf8_fix(path) if self.exists( path", "= self.ftp.size(path) except error_perm: size = None result = {", "0, \"ctime\" : 0, \"size\" : size or 0, \"name\"", "the input argument using the plus operator. the command fails", "if m: g = m.groups() result = { \"mode\" :", "unicode text (ord > 127) and also fails whenm given", "self.exists(p): self.ftp.mkd( p ) self.ftp.mkd(path) except Exception as e: print(\"ftp", "self.ftp.retrbinary(text, self.file.write) self.file.seek(0) def read(self,n=None): return self.file.read(n) def seek(self,pos,whence=SEEK_SET): return", "is None: self.close() class FTPReader(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self,", "stat(self,path): try: size = self.ftp.size(path) except error_perm: size = None", "import FTP,error_perm, all_errors import posixpath from io import BytesIO,SEEK_SET from", "under my use-case, I'm only asking if a path is", "CRLF to the input argument using the plus operator. the", "empty string ? # empty string returns drives p,_ =", "parent(self,path): # TODO: if path is C:\\\\ return empty string", "size = None result = { \"isDir\" : size is", "= path self.file = BytesIO() # open the file text", "FTPReader(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPReader, self).__init__()", "): m = reftp.match( url ) if m: g =", "impl # ftp.size() fails for various reasons unless the file", "size or 0, \"isDir\" : size is None, \"isLink\" :", "as e: print(\"ftp delete error: %s\"%e) def open(self,path,mode): if mode==\"wb\":", "the directory, then loops over # loaded items. # TODO:", "is an accessable file. I can infer that a failure", "22 else: result['port'] = 21 # ftp port default return", "then I could # use that to determine if the", "return self.ftp.size(path) is None except error_perm: # TODO: to think", "self def __exit__(self,typ,val,tb): if typ is None: self.close() class FTPSource(DataSource):", "def split(self,path): return posixpath.split(path) def splitext(self,path): return posixpath.splitext(path) def stat(self,path):", "that the path is a directory, # but this does", "BytesIO,SEEK_SET from .source import DataSource import sys import re reftp", "\" + utf8_fix(self.path) self.ftp.storbinary(text, self.file) def __enter__(self): return self def", "\"mtime\" : 0, \"ctime\" : 0, \"size\" : size or", "failure to # determine the size means that the path", "0: if result['mode'] == ssh: result['port'] = 22 else: result['port']", "p def move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath) def delete(self,path): # todo support removing", "self.exists(path): p = self.parent( path ) try: if not self.exists(p):", "try: self.ftp.quit() except all_errors as e: sys.stderr.write(\"Error Closing FTP connection\\n\")", "argument using the plus operator. the command fails when given", "means that the path is a directory, # but this", "def fix(self, path): return utf8_fix(path) def join(self,*args): return posixpath.join(*args) def", "utf8_fix(self.path) self.ftp.storbinary(text, self.file) def __enter__(self): return self def __exit__(self,typ,val,tb): if", ": g[4] or \"\", \"port\" : int(g[5][1:]) if g[5] else", "must accepts a STRING, since it builds a cmd and", "= path self.file = BytesIO() def write(self,data): return self.file.write(data) def", "def __exit__(self,typ,val,tb): if typ is None: self.close() class FTPSource(DataSource): \"\"\"", "the CRLF to the input argument using the plus operator.", "listdir(self,path): return self.ftp.nlst(path) def parent(self,path): # TODO: if path is", "ftp, path): super(FTPWriter, self).__init__() self.ftp = ftp self.path = path", "# todo support removing directory rmdir() path = utf8_fix(path) if", "self.ftp = ftp self.path = path self.file = BytesIO() #", "def utf8_fix(s): return ''.join([ a if ord(a)<128 else \"%02X\"%ord(a) for", "a failure to # determine the size means that the", "use cases. # I can't cache listdir calls, but if", "self.ftp.nlst(path) def parent(self,path): # TODO: if path is C:\\\\ return", "if x ] def relpath(self,path,base): return posixpath.relpath(path,base) def normpath(self,path,root=None): if", "from .source import DataSource import sys import re reftp =", "# ftp.size() fails for various reasons unless the file exists", "ftplib import FTP,error_perm, all_errors import posixpath from io import BytesIO,SEEK_SET", "for various reasons unless the file exists # and is", "result def chmod(self,path,mode): print(\"chmod not implemented\") def getExportPath(self,path): return self.hostname+path", "letters def __init__(self, host, port, username=\"\", password=\"\"): super(FTPSource, self).__init__() self.ftp", "= \"%s:%d\"%(host,port) def root(self): return \"/\" def close(self): try: self.ftp.quit()", "cache listdir calls, but if I could, then I could", "delete(self,path): # todo support removing directory rmdir() path = utf8_fix(path)", "ftp self.path = path self.file = BytesIO() # open the", "utf8_fix(path) if self.exists( path ): if self.isdir(path): try: self.ftp.rmd(path) except", "string returns drives p,_ = posixpath.split(path) return p def move(self,oldpath,newpath):", "all_errors import posixpath from io import BytesIO,SEEK_SET from .source import", "whenm given a byte string. \"\"\" # TODO: turn this", ") def mkdir(self,path): # this is a really ugly quick", "I'm only asking if a path is a directory #", "ugly quick and dirty solution path = utf8_fix(path) if not", "# loaded items. # TODO: on windows we need a", "%s\"%e) def split(self,path): return posixpath.split(path) def splitext(self,path): return posixpath.splitext(path) def", "super(FTPReader, self).__init__() self.ftp = ftp self.path = path self.file =", "self.ftp = FTP() self.ftp.connect(host,port) self.ftp.login(username,password) self.hostname = \"%s:%d\"%(host,port) def root(self):", "seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return self.file.tell() def close(self): self.file.seek(0)", "is None except error_perm: # TODO: to think about more", ": size is None, \"isLink\" : False, } return result", ": self.split(path)[1], \"size\" : size or 0, \"isDir\" : size", ": 0, \"ctime\" : 0, \"size\" : size or 0,", "url ): m = reftp.match( url ) if m: g", "if I could, then I could # use that to", "error: %s\"%e) def open(self,path,mode): if mode==\"wb\": return FTPWriter(self.ftp,path) elif mode==\"rb\":", "} return result def chmod(self,path,mode): print(\"chmod not implemented\") def getExportPath(self,path):", "# not fast for thus file system :( try: size", "thus file system :( try: size = self.ftp.size(path) except error_perm:", "\"\", \"password\" : g[3] or \"\", \"hostname\" : g[4] or", "I Already think it exists. Under the current FTP impl", "# but this does not hold true under other use", "error_perm: size = None result = { \"isDir\" : size", "that to determine if the file exists return True#self.exists( path", "+ utf8_fix(self.path) self.ftp.storbinary(text, self.file) def __enter__(self): return self def __exit__(self,typ,val,tb):", "return True#self.exists( path ) def mkdir(self,path): # this is a", "try: size = self.ftp.size(path) except error_perm: size = None result", ": size or 0, \"name\" : self.split(path)[1], \"mode\" : 0", "False, } return result def chmod(self,path,mode): print(\"chmod not implemented\") def", "path ) def listdir(self,path): return self.ftp.nlst(path) def parent(self,path): # TODO:", "self.ftp.storbinary(text, self.file) def __enter__(self): return self def __exit__(self,typ,val,tb): if typ", "not path.startswith(\"/\"): path = posixpath.join(root,path) return posixpath.normpath( path ) def", "import BytesIO,SEEK_SET from .source import DataSource import sys import re", "e: print(\"ftp delete error: %s\"%e) def open(self,path,mode): if mode==\"wb\": return", ": g[3] or \"\", \"hostname\" : g[4] or \"\", \"port\"", ": size is None, \"isLink\": False, \"mtime\" : 0, \"ctime\"", "False, \"mtime\" : 0, \"ctime\" : 0, \"size\" : size", "\"mode\" : 0 } return result def stat_fast(self,path): # not", "in lst def isdir(self,path): path = utf8_fix(path) try: return self.ftp.size(path)", "self.file.close() def __enter__(self): return self def __exit__(self,typ,val,tb): if typ is", "[ x for x in path.replace(\"/\",\"\\\\\").split(\"\\\\\") if x ] def", "text = \"RETR \" + utf8_fix(self.path) self.ftp.retrbinary(text, self.file.write) self.file.seek(0) def", "self.ftp.delete(path) except Exception as e: print(\"ftp delete error: %s\"%e) def", "%s\"%url) def utf8_fix(s): return ''.join([ a if ord(a)<128 else \"%02X\"%ord(a)", "def join(self,*args): return posixpath.join(*args) def breakpath(self,path): return [ x for", "(ord > 127) and also fails whenm given a byte", "None: self.close() class FTPSource(DataSource): \"\"\" there is some sort of", "path = posixpath.join(root,path) return posixpath.normpath( path ) def listdir(self,path): return", "return self.file.tell() def close(self): self.file.close() def __enter__(self): return self def", "can infer that a failure to # determine the size", "unless the file exists # and is an accessable file.", "into a directory generator # which first loads the directory,", "def close(self): self.file.close() def __enter__(self): return self def __exit__(self,typ,val,tb): if", "FTP impl # ftp.size() fails for various reasons unless the", "super(FTPWriter, self).__init__() self.ftp = ftp self.path = path self.file =", "def __init__(self, host, port, username=\"\", password=\"\"): super(FTPSource, self).__init__() self.ftp =", "# TODO: to think about more later, # under my", "posixpath.split(path) def splitext(self,path): return posixpath.splitext(path) def stat(self,path): try: size =", "think it exists. Under the current FTP impl # ftp.size()", "error: %s\"%e) else: try: self.ftp.delete(path) except Exception as e: print(\"ftp", "tell(self): return self.file.tell() def close(self): self.file.seek(0) text = \"STOR \"", "open the file text = \"RETR \" + utf8_fix(self.path) self.ftp.retrbinary(text,", "g[4] or \"\", \"port\" : int(g[5][1:]) if g[5] else 0,", "some sort of problem with utf-8/latin-1 and ftplib storbinary must", "for FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPReader, self).__init__() self.ftp =", "= { \"isDir\" : size is None, \"isLink\": False, \"mtime\"", "= BytesIO() def write(self,data): return self.file.write(data) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence)", "is None, \"isLink\" : False, } return result def chmod(self,path,mode):", "= None result = { \"name\" : self.split(path)[1], \"size\" :", ") self.ftp.mkd(path) except Exception as e: print(\"ftp mkd error: %s\"%e)", "I can infer that a failure to # determine the", "} if result['port'] == 0: if result['mode'] == ssh: result['port']", "exists. Under the current FTP impl # ftp.size() fails for", "def exists(self,path): path = utf8_fix(path) p,n=posixpath.split(path) lst = set(self.listdir(p)) return", "= utf8_fix(path) if not self.exists(path): p = self.parent( path )", "self.ftp = ftp self.path = path self.file = BytesIO() def", "print(\"ftp delete error: %s\"%e) def open(self,path,mode): if mode==\"wb\": return FTPWriter(self.ftp,path)", "sys.stderr.write(\"Error Closing FTP connection\\n\") sys.stderr.write(\"%s\\n\"%e) super().close() def fix(self, path): return", "STRING, since it builds a cmd and add the CRLF", "removing directory rmdir() path = utf8_fix(path) if self.exists( path ):", "if ord(a)<128 else \"%02X\"%ord(a) for a in s]) class FTPWriter(object):", "{ \"isDir\" : size is None, \"isLink\": False, \"mtime\" :", "directory # if I Already think it exists. Under the", "lst def isdir(self,path): path = utf8_fix(path) try: return self.ftp.size(path) is", "return self.file.tell() def close(self): self.file.seek(0) text = \"STOR \" +", "true under other use cases. # I can't cache listdir", "__init__(self, ftp, path): super(FTPReader, self).__init__() self.ftp = ftp self.path =", "way to view available # drive letters def __init__(self, host,", "def read(self,n=None): return self.file.read(n) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self):", "try: self.ftp.delete(path) except Exception as e: print(\"ftp delete error: %s\"%e)", "= utf8_fix(path) if self.exists( path ): if self.isdir(path): try: self.ftp.rmd(path)", "cases. # I can't cache listdir calls, but if I", "parseFTPurl( url ): m = reftp.match( url ) if m:", "which first loads the directory, then loops over # loaded", "# empty string returns drives p,_ = posixpath.split(path) return p", "over # loaded items. # TODO: on windows we need", "\"path\" : g[6] or \"/\", } if result['port'] == 0:", "FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPWriter, self).__init__() self.ftp = ftp", "path is a directory # if I Already think it", "if not self.exists(path): p = self.parent( path ) try: if", "# and is an accessable file. I can infer that", "determine if the file exists return True#self.exists( path ) def", "s]) class FTPWriter(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp, path):", "super(FTPSource, self).__init__() self.ftp = FTP() self.ftp.connect(host,port) self.ftp.login(username,password) self.hostname = \"%s:%d\"%(host,port)", "path ) def mkdir(self,path): # this is a really ugly", "a in s]) class FTPWriter(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self,", "ord(a)<128 else \"%02X\"%ord(a) for a in s]) class FTPWriter(object): \"\"\"docstring", "def __exit__(self,typ,val,tb): if typ is None: self.close() class FTPReader(object): \"\"\"docstring", "g[3] or \"\", \"hostname\" : g[4] or \"\", \"port\" :", "except error_perm: size = None result = { \"name\" :", "return empty string ? # empty string returns drives p,_", "path = utf8_fix(path) try: return self.ftp.size(path) is None except error_perm:", "path is a directory, # but this does not hold", "e: print(\"ftp mkd error: %s\"%e) def split(self,path): return posixpath.split(path) def", "file exists # and is an accessable file. I can", "the path is a directory, # but this does not", "if mode==\"wb\": return FTPWriter(self.ftp,path) elif mode==\"rb\": return FTPReader(self.ftp,path) raise NotImplementedError(mode)", "since it builds a cmd and add the CRLF to", "\"mode\" : g[0], \"username\" : g[2] or \"\", \"password\" :", "\"name\" : self.split(path)[1], \"mode\" : 0 } return result def", "reftp.match( url ) if m: g = m.groups() result =", "class FTPReader(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPReader,", "the file exists return True#self.exists( path ) def mkdir(self,path): #", "def tell(self): return self.file.tell() def close(self): self.file.close() def __enter__(self): return", "x ] def relpath(self,path,base): return posixpath.relpath(path,base) def normpath(self,path,root=None): if root", "C:\\\\ return empty string ? # empty string returns drives", "Under the current FTP impl # ftp.size() fails for various", "typ is None: self.close() class FTPReader(object): \"\"\"docstring for FTPWriter\"\"\" def", "split(self,path): return posixpath.split(path) def splitext(self,path): return posixpath.splitext(path) def stat(self,path): try:", "= \"RETR \" + utf8_fix(self.path) self.ftp.retrbinary(text, self.file.write) self.file.seek(0) def read(self,n=None):", "self.ftp.mkd( p ) self.ftp.mkd(path) except Exception as e: print(\"ftp mkd", "a path is a directory # if I Already think", ": int(g[5][1:]) if g[5] else 0, \"path\" : g[6] or", "but if I could, then I could # use that", "available # drive letters def __init__(self, host, port, username=\"\", password=\"\"):", "return posixpath.splitext(path) def stat(self,path): try: size = self.ftp.size(path) except error_perm:", "def stat(self,path): try: size = self.ftp.size(path) except error_perm: size =", "if path is C:\\\\ return empty string ? # empty", "TODO: to think about more later, # under my use-case,", "connection\\n\") sys.stderr.write(\"%s\\n\"%e) super().close() def fix(self, path): return utf8_fix(path) def join(self,*args):", "e: sys.stderr.write(\"Error Closing FTP connection\\n\") sys.stderr.write(\"%s\\n\"%e) super().close() def fix(self, path):", "try: self.ftp.rmd(path) except Exception as e: print(\"ftp delete error: %s\"%e)", "class FTPWriter(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPWriter,", "seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return self.file.tell() def close(self): self.file.close()", "given unicode text (ord > 127) and also fails whenm", "self.ftp.size(path) except error_perm: size = None result = { \"isDir\"", "is a directory # if I Already think it exists.", "return self def __exit__(self,typ,val,tb): if typ is None: self.close() class", "posixpath.relpath(path,base) def normpath(self,path,root=None): if root and not path.startswith(\"/\"): path =", "set(self.listdir(p)) return n in lst def isdir(self,path): path = utf8_fix(path)", "if self.isdir(path): try: self.ftp.rmd(path) except Exception as e: print(\"ftp delete", "plus operator. the command fails when given unicode text (ord", "host, port, username=\"\", password=\"\"): super(FTPSource, self).__init__() self.ftp = FTP() self.ftp.connect(host,port)", "= FTP() self.ftp.connect(host,port) self.ftp.login(username,password) self.hostname = \"%s:%d\"%(host,port) def root(self): return", "%s\"%e) else: try: self.ftp.delete(path) except Exception as e: print(\"ftp delete", "view available # drive letters def __init__(self, host, port, username=\"\",", "raise NotImplementedError(mode) def exists(self,path): path = utf8_fix(path) p,n=posixpath.split(path) lst =", "use-case, I'm only asking if a path is a directory", "if g[5] else 0, \"path\" : g[6] or \"/\", }", "def normpath(self,path,root=None): if root and not path.startswith(\"/\"): path = posixpath.join(root,path)", "is a directory, # but this does not hold true", "def parseFTPurl( url ): m = reftp.match( url ) if", "items. # TODO: on windows we need a way to", "later, # under my use-case, I'm only asking if a", "is C:\\\\ return empty string ? # empty string returns", "class FTPSource(DataSource): \"\"\" there is some sort of problem with", "TODO: if path is C:\\\\ return empty string ? #", "fix(self, path): return utf8_fix(path) def join(self,*args): return posixpath.join(*args) def breakpath(self,path):", "close(self): self.file.seek(0) text = \"STOR \" + utf8_fix(self.path) self.ftp.storbinary(text, self.file)", "self.file.write) self.file.seek(0) def read(self,n=None): return self.file.read(n) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence)", "g[0], \"username\" : g[2] or \"\", \"password\" : g[3] or", "= reftp.match( url ) if m: g = m.groups() result", "posixpath.normpath( path ) def listdir(self,path): return self.ftp.nlst(path) def parent(self,path): #", "= None result = { \"isDir\" : size is None,", "directory, # but this does not hold true under other", "accessable file. I can infer that a failure to #", "ftplib storbinary must accepts a STRING, since it builds a", "there is some sort of problem with utf-8/latin-1 and ftplib", "if typ is None: self.close() class FTPReader(object): \"\"\"docstring for FTPWriter\"\"\"", "%s\"%e) def open(self,path,mode): if mode==\"wb\": return FTPWriter(self.ftp,path) elif mode==\"rb\": return", "else \"%02X\"%ord(a) for a in s]) class FTPWriter(object): \"\"\"docstring for", "self).__init__() self.ftp = FTP() self.ftp.connect(host,port) self.ftp.login(username,password) self.hostname = \"%s:%d\"%(host,port) def", "Exception as e: print(\"ftp delete error: %s\"%e) else: try: self.ftp.delete(path)", "to # determine the size means that the path is", "= \"STOR \" + utf8_fix(self.path) self.ftp.storbinary(text, self.file) def __enter__(self): return", "breakpath(self,path): return [ x for x in path.replace(\"/\",\"\\\\\").split(\"\\\\\") if x", "<filename>yue/core/explorer/ftpsource.py from ftplib import FTP,error_perm, all_errors import posixpath from io", "more later, # under my use-case, I'm only asking if", "utf8_fix(path) def join(self,*args): return posixpath.join(*args) def breakpath(self,path): return [ x", "file system :( try: size = self.ftp.size(path) except error_perm: size", "self def __exit__(self,typ,val,tb): if typ is None: self.close() class FTPReader(object):", "\"size\" : size or 0, \"isDir\" : size is None,", "= posixpath.split(path) return p def move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath) def delete(self,path): #", "could # use that to determine if the file exists", "hold true under other use cases. # I can't cache", "= 21 # ftp port default return result raise ValueError(\"invalid:", "other use cases. # I can't cache listdir calls, but", "given a byte string. \"\"\" # TODO: turn this into", "not hold true under other use cases. # I can't", "quick and dirty solution path = utf8_fix(path) if not self.exists(path):", "\"\"\" # TODO: turn this into a directory generator #", ": False, } return result def chmod(self,path,mode): print(\"chmod not implemented\")", "to think about more later, # under my use-case, I'm", "return n in lst def isdir(self,path): path = utf8_fix(path) try:", "Already think it exists. Under the current FTP impl #", "self.split(path)[1], \"mode\" : 0 } return result def stat_fast(self,path): #", "self.file.seek(0) text = \"STOR \" + utf8_fix(self.path) self.ftp.storbinary(text, self.file) def", "return FTPWriter(self.ftp,path) elif mode==\"rb\": return FTPReader(self.ftp,path) raise NotImplementedError(mode) def exists(self,path):", "\"isLink\": False, \"mtime\" : 0, \"ctime\" : 0, \"size\" :", ": 0 } return result def stat_fast(self,path): # not fast", "None, \"isLink\": False, \"mtime\" : 0, \"ctime\" : 0, \"size\"", "try: if not self.exists(p): self.ftp.mkd( p ) self.ftp.mkd(path) except Exception", "return result def stat_fast(self,path): # not fast for thus file", "self.ftp.size(path) is None except error_perm: # TODO: to think about", "a directory # if I Already think it exists. Under", "a cmd and add the CRLF to the input argument", "result['mode'] == ssh: result['port'] = 22 else: result['port'] = 21", "self.file.tell() def close(self): self.file.close() def __enter__(self): return self def __exit__(self,typ,val,tb):", "the plus operator. the command fails when given unicode text", "loads the directory, then loops over # loaded items. #", "self.file.seek(pos,whence) def tell(self): return self.file.tell() def close(self): self.file.seek(0) text =", "username=\"\", password=\"\"): super(FTPSource, self).__init__() self.ftp = FTP() self.ftp.connect(host,port) self.ftp.login(username,password) self.hostname", "a really ugly quick and dirty solution path = utf8_fix(path)", "re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)') def parseFTPurl( url ): m = reftp.match( url )", "return ''.join([ a if ord(a)<128 else \"%02X\"%ord(a) for a in", "the file text = \"RETR \" + utf8_fix(self.path) self.ftp.retrbinary(text, self.file.write)", "True#self.exists( path ) def mkdir(self,path): # this is a really", "\"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPWriter, self).__init__() self.ftp", "fast for thus file system :( try: size = self.ftp.size(path)", "fails when given unicode text (ord > 127) and also", "None result = { \"name\" : self.split(path)[1], \"size\" : size", "g[6] or \"/\", } if result['port'] == 0: if result['mode']", "elif mode==\"rb\": return FTPReader(self.ftp,path) raise NotImplementedError(mode) def exists(self,path): path =", "operator. the command fails when given unicode text (ord >", "self.isdir(path): try: self.ftp.rmd(path) except Exception as e: print(\"ftp delete error:", "drives p,_ = posixpath.split(path) return p def move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath) def", "path = utf8_fix(path) p,n=posixpath.split(path) lst = set(self.listdir(p)) return n in", "close(self): self.file.close() def __enter__(self): return self def __exit__(self,typ,val,tb): if typ", "BytesIO() def write(self,data): return self.file.write(data) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def", "ftp self.path = path self.file = BytesIO() def write(self,data): return", "text (ord > 127) and also fails whenm given a", "drive letters def __init__(self, host, port, username=\"\", password=\"\"): super(FTPSource, self).__init__()", "various reasons unless the file exists # and is an", "size means that the path is a directory, # but", "and dirty solution path = utf8_fix(path) if not self.exists(path): p", "FTPSource(DataSource): \"\"\" there is some sort of problem with utf-8/latin-1", "# TODO: on windows we need a way to view", "calls, but if I could, then I could # use", "size is None, \"isLink\" : False, } return result def", "root(self): return \"/\" def close(self): try: self.ftp.quit() except all_errors as", "returns drives p,_ = posixpath.split(path) return p def move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath)", "text = \"STOR \" + utf8_fix(self.path) self.ftp.storbinary(text, self.file) def __enter__(self):", "or \"\", \"password\" : g[3] or \"\", \"hostname\" : g[4]", "return result raise ValueError(\"invalid: %s\"%url) def utf8_fix(s): return ''.join([ a", "FTPReader(self.ftp,path) raise NotImplementedError(mode) def exists(self,path): path = utf8_fix(path) p,n=posixpath.split(path) lst", "current FTP impl # ftp.size() fails for various reasons unless", "self.ftp.rename(oldpath,newpath) def delete(self,path): # todo support removing directory rmdir() path", "m: g = m.groups() result = { \"mode\" : g[0],", "loaded items. # TODO: on windows we need a way", "as e: print(\"ftp mkd error: %s\"%e) def split(self,path): return posixpath.split(path)", "mkd error: %s\"%e) def split(self,path): return posixpath.split(path) def splitext(self,path): return", "I could, then I could # use that to determine", "dirty solution path = utf8_fix(path) if not self.exists(path): p =", "return self.file.seek(pos,whence) def tell(self): return self.file.tell() def close(self): self.file.close() def", "todo support removing directory rmdir() path = utf8_fix(path) if self.exists(", "self.close() class FTPReader(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp, path):", "> 127) and also fails whenm given a byte string.", "TODO: on windows we need a way to view available", "0, \"isDir\" : size is None, \"isLink\" : False, }", "TODO: turn this into a directory generator # which first", "result def stat_fast(self,path): # not fast for thus file system", "system :( try: size = self.ftp.size(path) except error_perm: size =", "ftp, path): super(FTPReader, self).__init__() self.ftp = ftp self.path = path", "that a failure to # determine the size means that", "__exit__(self,typ,val,tb): if typ is None: self.close() class FTPReader(object): \"\"\"docstring for", "21 # ftp port default return result raise ValueError(\"invalid: %s\"%url)", "normpath(self,path,root=None): if root and not path.startswith(\"/\"): path = posixpath.join(root,path) return", "None result = { \"isDir\" : size is None, \"isLink\":", ": g[6] or \"/\", } if result['port'] == 0: if", "= m.groups() result = { \"mode\" : g[0], \"username\" :", "0, \"size\" : size or 0, \"name\" : self.split(path)[1], \"mode\"", "re reftp = re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)') def parseFTPurl( url ): m =", "my use-case, I'm only asking if a path is a", "+ utf8_fix(self.path) self.ftp.retrbinary(text, self.file.write) self.file.seek(0) def read(self,n=None): return self.file.read(n) def", "self.file.seek(0) def read(self,n=None): return self.file.read(n) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def", "builds a cmd and add the CRLF to the input", "= utf8_fix(path) p,n=posixpath.split(path) lst = set(self.listdir(p)) return n in lst", "add the CRLF to the input argument using the plus", "else: result['port'] = 21 # ftp port default return result", "posixpath from io import BytesIO,SEEK_SET from .source import DataSource import", "lst = set(self.listdir(p)) return n in lst def isdir(self,path): path", "size = self.ftp.size(path) except error_perm: size = None result =", "print(\"ftp delete error: %s\"%e) else: try: self.ftp.delete(path) except Exception as", "or 0, \"isDir\" : size is None, \"isLink\" : False,", "p,n=posixpath.split(path) lst = set(self.listdir(p)) return n in lst def isdir(self,path):", "path): super(FTPReader, self).__init__() self.ftp = ftp self.path = path self.file", "def relpath(self,path,base): return posixpath.relpath(path,base) def normpath(self,path,root=None): if root and not", "e: print(\"ftp delete error: %s\"%e) else: try: self.ftp.delete(path) except Exception", "a byte string. \"\"\" # TODO: turn this into a", "\"/\" def close(self): try: self.ftp.quit() except all_errors as e: sys.stderr.write(\"Error", "# I can't cache listdir calls, but if I could,", "result = { \"mode\" : g[0], \"username\" : g[2] or", "def __init__(self, ftp, path): super(FTPWriter, self).__init__() self.ftp = ftp self.path", "then loops over # loaded items. # TODO: on windows", "close(self): try: self.ftp.quit() except all_errors as e: sys.stderr.write(\"Error Closing FTP", "return posixpath.relpath(path,base) def normpath(self,path,root=None): if root and not path.startswith(\"/\"): path", "raise ValueError(\"invalid: %s\"%url) def utf8_fix(s): return ''.join([ a if ord(a)<128", "but this does not hold true under other use cases.", "return self.file.seek(pos,whence) def tell(self): return self.file.tell() def close(self): self.file.seek(0) text", "\"username\" : g[2] or \"\", \"password\" : g[3] or \"\",", "\"\", \"hostname\" : g[4] or \"\", \"port\" : int(g[5][1:]) if", "with utf-8/latin-1 and ftplib storbinary must accepts a STRING, since", "x in path.replace(\"/\",\"\\\\\").split(\"\\\\\") if x ] def relpath(self,path,base): return posixpath.relpath(path,base)", "typ is None: self.close() class FTPSource(DataSource): \"\"\" there is some", "a STRING, since it builds a cmd and add the", "return utf8_fix(path) def join(self,*args): return posixpath.join(*args) def breakpath(self,path): return [", "def move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath) def delete(self,path): # todo support removing directory", "to determine if the file exists return True#self.exists( path )", "def parent(self,path): # TODO: if path is C:\\\\ return empty", "string. \"\"\" # TODO: turn this into a directory generator", "turn this into a directory generator # which first loads", "in path.replace(\"/\",\"\\\\\").split(\"\\\\\") if x ] def relpath(self,path,base): return posixpath.relpath(path,base) def", "and not path.startswith(\"/\"): path = posixpath.join(root,path) return posixpath.normpath( path )", "return self.file.read(n) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return self.file.tell()", "size = None result = { \"name\" : self.split(path)[1], \"size\"", "\"password\" : g[3] or \"\", \"hostname\" : g[4] or \"\",", "self.split(path)[1], \"size\" : size or 0, \"isDir\" : size is", "def write(self,data): return self.file.write(data) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self):", "join(self,*args): return posixpath.join(*args) def breakpath(self,path): return [ x for x", "error_perm: # TODO: to think about more later, # under", "int(g[5][1:]) if g[5] else 0, \"path\" : g[6] or \"/\",", "except Exception as e: print(\"ftp delete error: %s\"%e) def open(self,path,mode):", "byte string. \"\"\" # TODO: turn this into a directory", "if a path is a directory # if I Already", "sort of problem with utf-8/latin-1 and ftplib storbinary must accepts", "self.file) def __enter__(self): return self def __exit__(self,typ,val,tb): if typ is", "using the plus operator. the command fails when given unicode", "result = { \"isDir\" : size is None, \"isLink\": False,", "\"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp, path): super(FTPReader, self).__init__() self.ftp", "posixpath.join(*args) def breakpath(self,path): return [ x for x in path.replace(\"/\",\"\\\\\").split(\"\\\\\")", "def tell(self): return self.file.tell() def close(self): self.file.seek(0) text = \"STOR", "} return result def stat_fast(self,path): # not fast for thus", "directory, then loops over # loaded items. # TODO: on", "use that to determine if the file exists return True#self.exists(", "\"name\" : self.split(path)[1], \"size\" : size or 0, \"isDir\" :", ": size or 0, \"isDir\" : size is None, \"isLink\"", "\"isLink\" : False, } return result def chmod(self,path,mode): print(\"chmod not", "self.file.write(data) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return self.file.tell() def", "return [ x for x in path.replace(\"/\",\"\\\\\").split(\"\\\\\") if x ]", "return posixpath.join(*args) def breakpath(self,path): return [ x for x in", "# determine the size means that the path is a", "file exists return True#self.exists( path ) def mkdir(self,path): # this", "path): return utf8_fix(path) def join(self,*args): return posixpath.join(*args) def breakpath(self,path): return", "delete error: %s\"%e) def open(self,path,mode): if mode==\"wb\": return FTPWriter(self.ftp,path) elif", "it exists. Under the current FTP impl # ftp.size() fails", "self.path = path self.file = BytesIO() # open the file", "= { \"name\" : self.split(path)[1], \"size\" : size or 0,", "this into a directory generator # which first loads the", "self.exists( path ): if self.isdir(path): try: self.ftp.rmd(path) except Exception as", "of problem with utf-8/latin-1 and ftplib storbinary must accepts a", "__exit__(self,typ,val,tb): if typ is None: self.close() class FTPSource(DataSource): \"\"\" there", "self.ftp.rmd(path) except Exception as e: print(\"ftp delete error: %s\"%e) else:", "default return result raise ValueError(\"invalid: %s\"%url) def utf8_fix(s): return ''.join([", "self.ftp.size(path) except error_perm: size = None result = { \"name\"", "path = utf8_fix(path) if self.exists( path ): if self.isdir(path): try:", "127) and also fails whenm given a byte string. \"\"\"", "path ): if self.isdir(path): try: self.ftp.rmd(path) except Exception as e:", "if not self.exists(p): self.ftp.mkd( p ) self.ftp.mkd(path) except Exception as", "0, \"name\" : self.split(path)[1], \"mode\" : 0 } return result", "cmd and add the CRLF to the input argument using", "None: self.close() class FTPReader(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp,", "utf8_fix(s): return ''.join([ a if ord(a)<128 else \"%02X\"%ord(a) for a", "this is a really ugly quick and dirty solution path", "# open the file text = \"RETR \" + utf8_fix(self.path)", "write(self,data): return self.file.write(data) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return", "and is an accessable file. I can infer that a", ") try: if not self.exists(p): self.ftp.mkd( p ) self.ftp.mkd(path) except", "determine the size means that the path is a directory,", "on windows we need a way to view available #", "x for x in path.replace(\"/\",\"\\\\\").split(\"\\\\\") if x ] def relpath(self,path,base):", "we need a way to view available # drive letters", "exists(self,path): path = utf8_fix(path) p,n=posixpath.split(path) lst = set(self.listdir(p)) return n", "posixpath.splitext(path) def stat(self,path): try: size = self.ftp.size(path) except error_perm: size", "# TODO: if path is C:\\\\ return empty string ?", "isdir(self,path): path = utf8_fix(path) try: return self.ftp.size(path) is None except", "FTPWriter(self.ftp,path) elif mode==\"rb\": return FTPReader(self.ftp,path) raise NotImplementedError(mode) def exists(self,path): path", "def splitext(self,path): return posixpath.splitext(path) def stat(self,path): try: size = self.ftp.size(path)", "= posixpath.join(root,path) return posixpath.normpath( path ) def listdir(self,path): return self.ftp.nlst(path)", "utf8_fix(path) if not self.exists(path): p = self.parent( path ) try:", "password=\"\"): super(FTPSource, self).__init__() self.ftp = FTP() self.ftp.connect(host,port) self.ftp.login(username,password) self.hostname =", "port, username=\"\", password=\"\"): super(FTPSource, self).__init__() self.ftp = FTP() self.ftp.connect(host,port) self.ftp.login(username,password)", "def close(self): self.file.seek(0) text = \"STOR \" + utf8_fix(self.path) self.ftp.storbinary(text,", "return posixpath.split(path) def splitext(self,path): return posixpath.splitext(path) def stat(self,path): try: size", "if self.exists( path ): if self.isdir(path): try: self.ftp.rmd(path) except Exception", "# use that to determine if the file exists return", "self.file.seek(pos,whence) def tell(self): return self.file.tell() def close(self): self.file.close() def __enter__(self):", "a directory, # but this does not hold true under", "Exception as e: print(\"ftp delete error: %s\"%e) def open(self,path,mode): if", "rmdir() path = utf8_fix(path) if self.exists( path ): if self.isdir(path):", "return result def chmod(self,path,mode): print(\"chmod not implemented\") def getExportPath(self,path): return", "= self.parent( path ) try: if not self.exists(p): self.ftp.mkd( p", "move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath) def delete(self,path): # todo support removing directory rmdir()", "m = reftp.match( url ) if m: g = m.groups()", "from ftplib import FTP,error_perm, all_errors import posixpath from io import", "utf8_fix(path) p,n=posixpath.split(path) lst = set(self.listdir(p)) return n in lst def", "the current FTP impl # ftp.size() fails for various reasons", "error_perm: size = None result = { \"name\" : self.split(path)[1],", "\"isDir\" : size is None, \"isLink\": False, \"mtime\" : 0,", "is None, \"isLink\": False, \"mtime\" : 0, \"ctime\" : 0,", "utf-8/latin-1 and ftplib storbinary must accepts a STRING, since it", "0, \"path\" : g[6] or \"/\", } if result['port'] ==", "return self.ftp.nlst(path) def parent(self,path): # TODO: if path is C:\\\\", "if typ is None: self.close() class FTPSource(DataSource): \"\"\" there is", "result raise ValueError(\"invalid: %s\"%url) def utf8_fix(s): return ''.join([ a if", "] def relpath(self,path,base): return posixpath.relpath(path,base) def normpath(self,path,root=None): if root and", ".source import DataSource import sys import re reftp = re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)')", "# if I Already think it exists. Under the current", "path is C:\\\\ return empty string ? # empty string", "return p def move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath) def delete(self,path): # todo support", ": self.split(path)[1], \"mode\" : 0 } return result def stat_fast(self,path):", "path.replace(\"/\",\"\\\\\").split(\"\\\\\") if x ] def relpath(self,path,base): return posixpath.relpath(path,base) def normpath(self,path,root=None):", "return \"/\" def close(self): try: self.ftp.quit() except all_errors as e:", "def open(self,path,mode): if mode==\"wb\": return FTPWriter(self.ftp,path) elif mode==\"rb\": return FTPReader(self.ftp,path)", "stat_fast(self,path): # not fast for thus file system :( try:", "mkdir(self,path): # this is a really ugly quick and dirty", "reftp = re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)') def parseFTPurl( url ): m = reftp.match(", "__init__(self, ftp, path): super(FTPWriter, self).__init__() self.ftp = ftp self.path =", "except all_errors as e: sys.stderr.write(\"Error Closing FTP connection\\n\") sys.stderr.write(\"%s\\n\"%e) super().close()", "self.parent( path ) try: if not self.exists(p): self.ftp.mkd( p )", "\"RETR \" + utf8_fix(self.path) self.ftp.retrbinary(text, self.file.write) self.file.seek(0) def read(self,n=None): return", "or \"/\", } if result['port'] == 0: if result['mode'] ==", "about more later, # under my use-case, I'm only asking", "open(self,path,mode): if mode==\"wb\": return FTPWriter(self.ftp,path) elif mode==\"rb\": return FTPReader(self.ftp,path) raise", "need a way to view available # drive letters def", "asking if a path is a directory # if I", "and ftplib storbinary must accepts a STRING, since it builds", "__init__(self, host, port, username=\"\", password=\"\"): super(FTPSource, self).__init__() self.ftp = FTP()", "path ) try: if not self.exists(p): self.ftp.mkd( p ) self.ftp.mkd(path)", "file text = \"RETR \" + utf8_fix(self.path) self.ftp.retrbinary(text, self.file.write) self.file.seek(0)", "# TODO: turn this into a directory generator # which", "\"isDir\" : size is None, \"isLink\" : False, } return", "infer that a failure to # determine the size means", "def isdir(self,path): path = utf8_fix(path) try: return self.ftp.size(path) is None", "input argument using the plus operator. the command fails when", "self.file = BytesIO() def write(self,data): return self.file.write(data) def seek(self,pos,whence=SEEK_SET): return", "when given unicode text (ord > 127) and also fails", "storbinary must accepts a STRING, since it builds a cmd", "self.path = path self.file = BytesIO() def write(self,data): return self.file.write(data)", "problem with utf-8/latin-1 and ftplib storbinary must accepts a STRING,", "mode==\"rb\": return FTPReader(self.ftp,path) raise NotImplementedError(mode) def exists(self,path): path = utf8_fix(path)", "# this is a really ugly quick and dirty solution", "or 0, \"name\" : self.split(path)[1], \"mode\" : 0 } return", "this does not hold true under other use cases. #", "self.file.tell() def close(self): self.file.seek(0) text = \"STOR \" + utf8_fix(self.path)", "''.join([ a if ord(a)<128 else \"%02X\"%ord(a) for a in s])", "# under my use-case, I'm only asking if a path", ": 0, \"size\" : size or 0, \"name\" : self.split(path)[1],", "None, \"isLink\" : False, } return result def chmod(self,path,mode): print(\"chmod", "return posixpath.normpath( path ) def listdir(self,path): return self.ftp.nlst(path) def parent(self,path):", "io import BytesIO,SEEK_SET from .source import DataSource import sys import", ": g[2] or \"\", \"password\" : g[3] or \"\", \"hostname\"", ":( try: size = self.ftp.size(path) except error_perm: size = None", "import DataSource import sys import re reftp = re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)') def", "a way to view available # drive letters def __init__(self,", "relpath(self,path,base): return posixpath.relpath(path,base) def normpath(self,path,root=None): if root and not path.startswith(\"/\"):", "= ftp self.path = path self.file = BytesIO() def write(self,data):", "listdir calls, but if I could, then I could #", "def delete(self,path): # todo support removing directory rmdir() path =", "windows we need a way to view available # drive", "ssh: result['port'] = 22 else: result['port'] = 21 # ftp", "= ftp self.path = path self.file = BytesIO() # open", "not self.exists(p): self.ftp.mkd( p ) self.ftp.mkd(path) except Exception as e:", "def close(self): try: self.ftp.quit() except all_errors as e: sys.stderr.write(\"Error Closing", "splitext(self,path): return posixpath.splitext(path) def stat(self,path): try: size = self.ftp.size(path) except", "\" + utf8_fix(self.path) self.ftp.retrbinary(text, self.file.write) self.file.seek(0) def read(self,n=None): return self.file.read(n)", "it builds a cmd and add the CRLF to the", "in s]) class FTPWriter(object): \"\"\"docstring for FTPWriter\"\"\" def __init__(self, ftp,", "\"ctime\" : 0, \"size\" : size or 0, \"name\" :", "port default return result raise ValueError(\"invalid: %s\"%url) def utf8_fix(s): return", "posixpath.split(path) return p def move(self,oldpath,newpath): self.ftp.rename(oldpath,newpath) def delete(self,path): # todo", "self.file = BytesIO() # open the file text = \"RETR", "generator # which first loads the directory, then loops over", "if result['mode'] == ssh: result['port'] = 22 else: result['port'] =", "empty string returns drives p,_ = posixpath.split(path) return p def", "think about more later, # under my use-case, I'm only", "to the input argument using the plus operator. the command", "== ssh: result['port'] = 22 else: result['port'] = 21 #", ") def listdir(self,path): return self.ftp.nlst(path) def parent(self,path): # TODO: if", "really ugly quick and dirty solution path = utf8_fix(path) if", "\"%s:%d\"%(host,port) def root(self): return \"/\" def close(self): try: self.ftp.quit() except", "tell(self): return self.file.tell() def close(self): self.file.close() def __enter__(self): return self", "g = m.groups() result = { \"mode\" : g[0], \"username\"", "\"hostname\" : g[4] or \"\", \"port\" : int(g[5][1:]) if g[5]", "result['port'] = 21 # ftp port default return result raise", "def __enter__(self): return self def __exit__(self,typ,val,tb): if typ is None:", "p ) self.ftp.mkd(path) except Exception as e: print(\"ftp mkd error:", "path.startswith(\"/\"): path = posixpath.join(root,path) return posixpath.normpath( path ) def listdir(self,path):", "except error_perm: size = None result = { \"isDir\" :", "if root and not path.startswith(\"/\"): path = posixpath.join(root,path) return posixpath.normpath(", "0 } return result def stat_fast(self,path): # not fast for", "can't cache listdir calls, but if I could, then I", "accepts a STRING, since it builds a cmd and add", "print(\"ftp mkd error: %s\"%e) def split(self,path): return posixpath.split(path) def splitext(self,path):", "{ \"name\" : self.split(path)[1], \"size\" : size or 0, \"isDir\"", "super().close() def fix(self, path): return utf8_fix(path) def join(self,*args): return posixpath.join(*args)", "does not hold true under other use cases. # I", "def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return self.file.tell() def close(self):", "# drive letters def __init__(self, host, port, username=\"\", password=\"\"): super(FTPSource,", "exists return True#self.exists( path ) def mkdir(self,path): # this is", "reasons unless the file exists # and is an accessable", "DataSource import sys import re reftp = re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)') def parseFTPurl(", ": g[0], \"username\" : g[2] or \"\", \"password\" : g[3]", "import re reftp = re.compile('(ssh|ftp)\\:\\/\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\/(.*)') def parseFTPurl( url ): m", "is None: self.close() class FTPSource(DataSource): \"\"\" there is some sort", "\"size\" : size or 0, \"name\" : self.split(path)[1], \"mode\" :", "except Exception as e: print(\"ftp delete error: %s\"%e) else: try:", "self.file.read(n) def seek(self,pos,whence=SEEK_SET): return self.file.seek(pos,whence) def tell(self): return self.file.tell() def", "# which first loads the directory, then loops over #", "# ftp port default return result raise ValueError(\"invalid: %s\"%url) def", "fails whenm given a byte string. \"\"\" # TODO: turn", "== 0: if result['mode'] == ssh: result['port'] = 22 else:", "\"port\" : int(g[5][1:]) if g[5] else 0, \"path\" : g[6]", "BytesIO() # open the file text = \"RETR \" +", "? # empty string returns drives p,_ = posixpath.split(path) return", "\"STOR \" + utf8_fix(self.path) self.ftp.storbinary(text, self.file) def __enter__(self): return self", "could, then I could # use that to determine if" ]
[ "[ 'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions', ('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions', ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\\\Windows\\\\System32',", "self._MACOS_USERS) username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath(", "knowledge_base_object.AddEnvironmentVariable(environment_variable) with self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable) def testGetEnvironmentVariable(self): \"\"\"Tests the GetEnvironmentVariable functions.\"\"\"", "self._WINDOWS_USERS) username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath(", "testOperatingSystemProperty(self): \"\"\"Tests the operating_system property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() operating_system =", "hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser')", "username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts()) def testReadSystemConfigurationArtifact(self): \"\"\"Tests the ReadSystemConfigurationArtifact function.\"\"\" knowledge_base_object", "knowledge_base_object.AddEnvironmentVariable(environment_variable) def testGetEnvironmentVariable(self): \"\"\"Tests the GetEnvironmentVariable functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "testGetUsernameByIdentifier(self): \"\"\"Tests the GetUsernameByIdentifier function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account =", "value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables = knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables), 2) def testGetHostname(self): \"\"\"Tests", "knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts())", "self.assertEqual(knowledge_base_object.year, 0) def testAddUserAccount(self): \"\"\"Tests the AddUserAccount function.\"\"\" knowledge_base_object =", "'dude') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2]) self.assertEqual(username, 'frank') username = knowledge_base_object.GetUsernameForPath(", "knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser',", "MUI form Windows time zone name. knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') with", "= knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252') with self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus') def testSetHostname(self): \"\"\"Tests the", "self.assertEqual(username, 'frank') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2]) self.assertIsNone(username) def testGetSetValue(self): \"\"\"Tests", "def testSetHostname(self): \"\"\"Tests the SetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact", "artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) source_configurations = ( knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations),", "\"\"\"Tests the ReadSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() system_configuration = artifacts.SystemConfigurationArtifact()", "year property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year, 0) def testAddUserAccount(self): \"\"\"Tests", "artifacts.UserAccountArtifact( identifier=identifier, user_directory=user.get('path', None), username=user.get('name', None)) knowledge_base_object.AddUserAccount(user_account) def testCodepageProperty(self): \"\"\"Tests", "'Windows') operating_system = knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system, 'Windows') def testTimezoneProperty(self): \"\"\"Tests the", "hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, 'myhost.mydomain') def testSetActiveSession(self): \"\"\"Tests the SetActiveSession", "property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname, '') def testOperatingSystemProperty(self): \"\"\"Tests the", "knowledge_base_object.GetEnvironmentVariable( 'Bogus') self.assertIsNone(test_environment_variable) def testGetEnvironmentVariables(self): \"\"\"Tests the GetEnvironmentVariables function.\"\"\" knowledge_base_object", "'') def testOperatingSystemProperty(self): \"\"\"Tests the operating_system property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) source_configurations =", "def testTimezoneProperty(self): \"\"\"Tests the timezone property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone,", "= knowledge_base.KnowledgeBase() time_zone_artifact = artifacts.TimeZoneArtifact( localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112', name='Eastern Standard", "knowledge_base_object.AddUserAccount(user_account) def testCodepageProperty(self): \"\"\"Tests the codepage property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "{'name': 'frank', 'path': 'C:\\\\Users\\\\frank', 'sid': 'S-2'}] def _SetUserAccounts(self, knowledge_base_object, users):", "an IANA time zone name. knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich') # Set", "tests for GetMountPoint. def testGetSourceConfigurationArtifacts(self): \"\"\"Tests the GetSourceConfigurationArtifacts function.\"\"\" knowledge_base_object", "for GetMountPoint. def testGetSourceConfigurationArtifacts(self): \"\"\"Tests the GetSourceConfigurationArtifacts function.\"\"\" knowledge_base_object =", "usename = knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename, 'testuser') usename = knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename, '')", "\"\"\"Tests the GetEnvironmentVariables function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact(", "= knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4]) self.assertEqual(username, 'hans') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertIsNone(username)", "_WINDOWS_PATHS = [ 'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions', ('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions', ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\'", "= knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year, 0) def testAddUserAccount(self): \"\"\"Tests the AddUserAccount function.\"\"\"", "functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() expected_value = 'test value' knowledge_base_object.SetValue('Test', expected_value)", "'/Users/hans', 'sid': '4352'}, {'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}] _WINDOWS_PATHS", "user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) system_configuration = ( knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name,", "knowledge_base_object.GetValue('tEsT') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('Bogus') self.assertIsNone(value) def testHasUserAccounts(self): \"\"\"Tests", "Data/Google/Chrome/Default', ('/Users/frank/Library/Application Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions'] _MACOS_USERS = [ {'name':", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account =", "value' knowledge_base_object.SetValue('Test', expected_value) value = knowledge_base_object.GetValue('Test') self.assertEqual(value, expected_value) value =", "tests for SetTextPrepend. def testSetTimeZone(self): \"\"\"Tests the SetTimeZone function.\"\"\" knowledge_base_object", "value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'SystemRoot') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable(", "\"\"\"Tests the SetActiveSession function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual( knowledge_base_object._active_session,", "name. knowledge_base_object.SetTimeZone('Eastern Standard Time') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a localized", "= knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4]) self.assertEqual(username,", "= knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname, '') def testOperatingSystemProperty(self): \"\"\"Tests the operating_system property.\"\"\"", "self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'Bogus') self.assertIsNone(test_environment_variable) def testGetEnvironmentVariables(self): \"\"\"Tests the", "the ReadSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.hostname", "[ {'name': 'dude', 'path': 'C:\\\\Users\\\\dude', 'sid': 'S-1'}, {'name': 'frank', 'path':", "GetMountPoint. def testGetSourceConfigurationArtifacts(self): \"\"\"Tests the GetSourceConfigurationArtifacts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "{'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}] _WINDOWS_PATHS = [ 'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions',", "HasUserAccounts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account = artifacts.UserAccountArtifact( identifier='1000',", "testAddUserAccount(self): \"\"\"Tests the AddUserAccount function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account =", "'0'}, {'name': 'frank', 'path': '/Users/frank', 'sid': '4052'}, {'name': 'hans', 'path':", "# -*- coding: utf-8 -*- \"\"\"Tests for the knowledge base.\"\"\"", "knowledge base.\"\"\" import unittest from plaso.containers import artifacts from plaso.engine", "artifacts from plaso.engine import knowledge_base from tests import test_lib as", "with self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable) def testGetEnvironmentVariable(self): \"\"\"Tests the GetEnvironmentVariable functions.\"\"\" knowledge_base_object", "= knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts), 0) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser')", "case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='WinDir', value='C:\\\\Windows')", "'') # TODO: add tests for GetMountPoint. def testGetSourceConfigurationArtifacts(self): \"\"\"Tests", "knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations), 1) self.assertIsNotNone(source_configurations[0]) system_configuration = source_configurations[0].system_configuration self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name,", "= knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) # TODO: add tests", "self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts()) def", "'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions', ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\\\Windows\\\\System32', 'C:\\\\Stuff/with path separator\\\\Folder'] _WINDOWS_USERS =", "self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') # TODO: add tests for GetTextPrepend. def testGetUsernameByIdentifier(self):", "# pylint: disable=protected-access _MACOS_PATHS = [ '/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/'", "artifacts.TimeZoneArtifact( localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112', name='Eastern Standard Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) # Set", "Data/Google/Chrome/Default/Extensions'] _MACOS_USERS = [ {'name': 'root', 'path': '/var/root', 'sid': '0'},", "expected_value) value = knowledge_base_object.GetValue('Test') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('tEsT') self.assertEqual(value,", "= artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) # TODO: add tests for SetMountPoint. #", "\"\"\" for user in users: identifier = user.get('sid', user.get('uid', None))", "GetEnvironmentVariables function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot',", "usename = knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename, '') def testGetUsernameForPath(self): \"\"\"Tests the GetUsernameForPath", "test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'SystemRoot') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable)", "knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual( knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION) def testSetCodepage(self): \"\"\"Tests the SetCodepage", "knowledge_base_object = knowledge_base.KnowledgeBase() operating_system = knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system', 'Windows') operating_system", "self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('tEsT') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('Bogus')", "'sid': '1123'}] _WINDOWS_PATHS = [ 'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions', ('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'),", "Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log', '/Users/frank/Library/Application Data/Google/Chrome/Default', '/Users/hans/Library/Application Data/Google/Chrome/Default', ('/Users/frank/Library/Application", "'C:\\\\Users\\\\frank', 'sid': 'S-2'}] def _SetUserAccounts(self, knowledge_base_object, users): \"\"\"Sets the user", "identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) source_configurations = ( knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations), 1)", "def testOperatingSystemProperty(self): \"\"\"Tests the operating_system property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() operating_system", "# TODO: add tests for GetTextPrepend. def testGetUsernameByIdentifier(self): \"\"\"Tests the", "0) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts), 1)", "(KnowledgeBase): knowledge base. users (list[dict[str,str])): users. \"\"\" for user in", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() time_zone_artifact = artifacts.TimeZoneArtifact( localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112',", "the HasUserAccounts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account = artifacts.UserAccountArtifact(", "python3 # -*- coding: utf-8 -*- \"\"\"Tests for the knowledge", "knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage, 'cp1252') def testHostnameProperty(self): \"\"\"Tests the hostname", "knowledge_base_object.GetEnvironmentVariable( 'SystemRoot') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable) test_environment_variable =", "'sid': '0'}, {'name': 'frank', 'path': '/Users/frank', 'sid': '4052'}, {'name': 'hans',", "= knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system', 'Windows') operating_system = knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system, 'Windows')", "self._MACOS_PATHS[4]) self.assertEqual(username, 'hans') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertIsNone(username) knowledge_base_object =", "import test_lib as shared_test_lib class KnowledgeBaseTest(shared_test_lib.BaseTestCase): \"\"\"Tests for the knowledge", "hostname property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname, '') def testOperatingSystemProperty(self): \"\"\"Tests", "name='myhost.mydomain') user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname", "def testGetHostname(self): \"\"\"Tests the GetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname", "self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a MUI form Windows time zone", "in users: identifier = user.get('sid', user.get('uid', None)) if not identifier:", "self.assertEqual(len(source_configurations), 1) self.assertIsNotNone(source_configurations[0]) system_configuration = source_configurations[0].system_configuration self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')", "artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) source_configurations", "knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2]) self.assertEqual(username, 'frank') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2]) self.assertIsNone(username) def", "testGetSetValue(self): \"\"\"Tests the Get and SetValue functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "from plaso.engine import knowledge_base from tests import test_lib as shared_test_lib", "= [ '/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log', '/Users/frank/Library/Application Data/Google/Chrome/Default',", "utf-8 -*- \"\"\"Tests for the knowledge base.\"\"\" import unittest from", "= artifacts.SystemConfigurationArtifact() system_configuration.hostname = artifacts.HostnameArtifact( name='myhost.mydomain') user_account = artifacts.UserAccountArtifact( identifier='1000',", "self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') with self.assertRaises(ValueError): knowledge_base_object.SetTimeZone('Bogus') if __name__ == '__main__': unittest.main()", "knowledge_base_object.AddUserAccount(user_account) def testAddEnvironmentVariable(self): \"\"\"Tests the AddEnvironmentVariable function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "'SystemRoot') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable(", "# TODO: add tests for SetTextPrepend. def testSetTimeZone(self): \"\"\"Tests the", "= artifacts.UserAccountArtifact( identifier=identifier, user_directory=user.get('path', None), username=user.get('name', None)) knowledge_base_object.AddUserAccount(user_account) def testCodepageProperty(self):", "environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) with self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable)", "the SetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact)", "knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) usename =", "= knowledge_base_object.GetEnvironmentVariable( 'SystemRoot') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable) test_environment_variable", "name. knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich') # Set a Windows time zone", "self.assertIsNone(value) def testHasUserAccounts(self): \"\"\"Tests the HasUserAccounts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "zone name. knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') with self.assertRaises(ValueError): knowledge_base_object.SetTimeZone('Bogus') if __name__", "mui_form='@tzres.dll,-112', name='Eastern Standard Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) # Set an IANA time", "'myhost.mydomain') def testSetActiveSession(self): \"\"\"Tests the SetActiveSession function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS) username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertEqual(username, 'dude') username", "artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) # TODO: add tests for SetMountPoint. # TODO:", "the AddUserAccount function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000',", "knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact(", "timezone property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone, 'UTC') def testUserAccountsProperty(self): \"\"\"Tests", "self._MACOS_PATHS[2]) self.assertIsNone(username) def testGetSetValue(self): \"\"\"Tests the Get and SetValue functions.\"\"\"", "source_configurations = ( knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations), 1) self.assertIsNotNone(source_configurations[0]) system_configuration = source_configurations[0].system_configuration", "('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\\\Windows\\\\System32', 'C:\\\\Stuff/with path separator\\\\Folder'] _WINDOWS_USERS = [ {'name':", "_GetSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account", "users. \"\"\" for user in users: identifier = user.get('sid', user.get('uid',", "1) def testYearProperty(self): \"\"\"Tests the year property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "expected_value) value = knowledge_base_object.GetValue('Bogus') self.assertIsNone(value) def testHasUserAccounts(self): \"\"\"Tests the HasUserAccounts", "knowledge_base.KnowledgeBase() operating_system = knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system', 'Windows') operating_system = knowledge_base_object.GetValue('operating_system')", "for SetTextPrepend. def testSetTimeZone(self): \"\"\"Tests the SetTimeZone function.\"\"\" knowledge_base_object =", "user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts()) def testReadSystemConfigurationArtifact(self): \"\"\"Tests the ReadSystemConfigurationArtifact function.\"\"\"", "{'name': 'dude', 'path': 'C:\\\\Users\\\\dude', 'sid': 'S-1'}, {'name': 'frank', 'path': 'C:\\\\Users\\\\frank',", "the GetUsernameForPath function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS) username =", "Standard Time') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a localized Windows time", "TODO: add tests for SetMountPoint. # TODO: add tests for", "from tests import test_lib as shared_test_lib class KnowledgeBaseTest(shared_test_lib.BaseTestCase): \"\"\"Tests for", "'root', 'path': '/var/root', 'sid': '0'}, {'name': 'frank', 'path': '/Users/frank', 'sid':", "'Europe/Zurich') # Set a Windows time zone name. knowledge_base_object.SetTimeZone('Eastern Standard", "= knowledge_base_object.GetHostname() self.assertEqual(hostname, '') # TODO: add tests for GetMountPoint.", "(list[dict[str,str])): users. \"\"\" for user in users: identifier = user.get('sid',", "'C:\\\\Users\\\\dude', 'sid': 'S-1'}, {'name': 'frank', 'path': 'C:\\\\Users\\\\frank', 'sid': 'S-2'}] def", "the year property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year, 0) def testAddUserAccount(self):", "user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname =", "the user accounts in the knowledge base. Args: knowledge_base_object (KnowledgeBase):", "= user.get('sid', user.get('uid', None)) if not identifier: continue user_account =", "knowledge base. users (list[dict[str,str])): users. \"\"\" for user in users:", "system_configuration = ( knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') # TODO:", "SetTextPrepend. def testSetTimeZone(self): \"\"\"Tests the SetTimeZone function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "user in users: identifier = user.get('sid', user.get('uid', None)) if not", "= knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage, 'cp1252') def testHostnameProperty(self): \"\"\"Tests the hostname property.\"\"\"", "operating_system = knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system', 'Windows') operating_system = knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system,", "'') usename = knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename, '') def testGetUsernameForPath(self): \"\"\"Tests the", "test_lib as shared_test_lib class KnowledgeBaseTest(shared_test_lib.BaseTestCase): \"\"\"Tests for the knowledge base.\"\"\"", "ReadSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.hostname =", "SetMountPoint. # TODO: add tests for SetTextPrepend. def testSetTimeZone(self): \"\"\"Tests", "case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'SystemRoot') self.assertIsNotNone(test_environment_variable) test_environment_variable", "identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) usename = knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename, 'testuser') usename", "# Set a MUI form Windows time zone name. knowledge_base_object.SetTimeZone('@tzres.dll,-112')", "def testGetEnvironmentVariable(self): \"\"\"Tests the GetEnvironmentVariable functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable", "_MACOS_PATHS = [ '/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log', '/Users/frank/Library/Application", "self.assertEqual(usename, 'testuser') usename = knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename, '') usename = knowledge_base_object.GetUsernameByIdentifier('1001')", "identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts()) def testReadSystemConfigurationArtifact(self): \"\"\"Tests the ReadSystemConfigurationArtifact", "\"\"\"Tests the GetUsernameByIdentifier function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact(", "coding: utf-8 -*- \"\"\"Tests for the knowledge base.\"\"\" import unittest", "('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions', ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\\\Windows\\\\System32', 'C:\\\\Stuff/with path separator\\\\Folder'] _WINDOWS_USERS", "time_zone_artifact = artifacts.TimeZoneArtifact( localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112', name='Eastern Standard Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact)", "knowledge_base_object.SetTimeZone('Eastern (standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a MUI form Windows", "'C:\\\\Stuff/with path separator\\\\Folder'] _WINDOWS_USERS = [ {'name': 'dude', 'path': 'C:\\\\Users\\\\dude',", "test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'Bogus') self.assertIsNone(test_environment_variable)", "knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables = knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables), 2) def testGetHostname(self): \"\"\"Tests the", "from plaso.containers import artifacts from plaso.engine import knowledge_base from tests", "tests for GetTextPrepend. def testGetUsernameByIdentifier(self): \"\"\"Tests the GetUsernameByIdentifier function.\"\"\" knowledge_base_object", "self._WINDOWS_PATHS[0]) self.assertIsNone(username) knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS) username = knowledge_base_object.GetUsernameForPath(", "'sid': '4352'}, {'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}] _WINDOWS_PATHS =", "identifier: continue user_account = artifacts.UserAccountArtifact( identifier=identifier, user_directory=user.get('path', None), username=user.get('name', None))", "user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) with self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account) def testAddEnvironmentVariable(self): \"\"\"Tests the", "username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts), 1) def testYearProperty(self): \"\"\"Tests the year property.\"\"\"", "the knowledge base. Args: knowledge_base_object (KnowledgeBase): knowledge base. users (list[dict[str,str])):", "name='WinDir', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables = knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables), 2) def testGetHostname(self):", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.hostname = artifacts.HostnameArtifact(", "'myhost.mydomain') # TODO: add tests for GetTextPrepend. def testGetUsernameByIdentifier(self): \"\"\"Tests", "def testReadSystemConfigurationArtifact(self): \"\"\"Tests the ReadSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() system_configuration", "import artifacts from plaso.engine import knowledge_base from tests import test_lib", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual( knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION)", "= knowledge_base_object.GetEnvironmentVariable( 'Bogus') self.assertIsNone(test_environment_variable) def testGetEnvironmentVariables(self): \"\"\"Tests the GetEnvironmentVariables function.\"\"\"", "in the knowledge base. Args: knowledge_base_object (KnowledgeBase): knowledge base. users", "\"\"\"Tests the SetCodepage function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252') with self.assertRaises(ValueError):", "_MACOS_USERS = [ {'name': 'root', 'path': '/var/root', 'sid': '0'}, {'name':", "'path': '/var/root', 'sid': '0'}, {'name': 'frank', 'path': '/Users/frank', 'sid': '4052'},", "separator\\\\Folder'] _WINDOWS_USERS = [ {'name': 'dude', 'path': 'C:\\\\Users\\\\dude', 'sid': 'S-1'},", "= knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename, '') usename = knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename, '') def", "add tests for SetMountPoint. # TODO: add tests for SetTextPrepend.", "accounts in the knowledge base. Args: knowledge_base_object (KnowledgeBase): knowledge base.", "user.get('uid', None)) if not identifier: continue user_account = artifacts.UserAccountArtifact( identifier=identifier,", "2) def testGetHostname(self): \"\"\"Tests the GetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "KnowledgeBaseTest(shared_test_lib.BaseTestCase): \"\"\"Tests for the knowledge base.\"\"\" # pylint: disable=protected-access _MACOS_PATHS", "user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) with self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account)", "testGetEnvironmentVariable(self): \"\"\"Tests the GetEnvironmentVariable functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable =", "testGetUsernameForPath(self): \"\"\"Tests the GetUsernameForPath function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS)", "'dude', 'path': 'C:\\\\Users\\\\dude', 'sid': 'S-1'}, {'name': 'frank', 'path': 'C:\\\\Users\\\\frank', 'sid':", "testGetEnvironmentVariables(self): \"\"\"Tests the GetEnvironmentVariables function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable =", "def testAddUserAccount(self): \"\"\"Tests the AddUserAccount function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account", "user.get('sid', user.get('uid', None)) if not identifier: continue user_account = artifacts.UserAccountArtifact(", "artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) with self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account) def testAddEnvironmentVariable(self):", "'/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log', '/Users/frank/Library/Application Data/Google/Chrome/Default', '/Users/hans/Library/Application Data/Google/Chrome/Default',", "= knowledge_base.KnowledgeBase() hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, '') # TODO: add", "knowledge_base_object.GetHostname() self.assertEqual(hostname, '') # TODO: add tests for GetMountPoint. def", "knowledge_base_object.SetValue('operating_system', 'Windows') operating_system = knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system, 'Windows') def testTimezoneProperty(self): \"\"\"Tests", "knowledge_base.KnowledgeBase() expected_value = 'test value' knowledge_base_object.SetValue('Test', expected_value) value = knowledge_base_object.GetValue('Test')", "base. Args: knowledge_base_object (KnowledgeBase): knowledge base. users (list[dict[str,str])): users. \"\"\"", "identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) with self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account) def testAddEnvironmentVariable(self): \"\"\"Tests", "'/Users/frank/Library/Application Data/Google/Chrome/Default', '/Users/hans/Library/Application Data/Google/Chrome/Default', ('/Users/frank/Library/Application Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions'] _MACOS_USERS", "'path': '/Users/frank', 'sid': '4052'}, {'name': 'hans', 'path': '/Users/hans', 'sid': '4352'},", "= artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname = knowledge_base_object.GetHostname()", "knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) # Set an IANA time zone name. knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone,", "add tests for SetTextPrepend. def testSetTimeZone(self): \"\"\"Tests the SetTimeZone function.\"\"\"", "environment_variables = knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables), 2) def testGetHostname(self): \"\"\"Tests the GetHostname", "def testHasUserAccounts(self): \"\"\"Tests the HasUserAccounts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts())", "self.assertEqual(operating_system, 'Windows') def testTimezoneProperty(self): \"\"\"Tests the timezone property.\"\"\" knowledge_base_object =", "identifier = user.get('sid', user.get('uid', None)) if not identifier: continue user_account", "testSetCodepage(self): \"\"\"Tests the SetCodepage function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252') with", "_WINDOWS_USERS = [ {'name': 'dude', 'path': 'C:\\\\Users\\\\dude', 'sid': 'S-1'}, {'name':", "knowledge_base_object.AddUserAccount(user_account) with self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account) def testAddEnvironmentVariable(self): \"\"\"Tests the AddEnvironmentVariable function.\"\"\"", "\"\"\"Tests the year property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year, 0) def", "environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='WinDir', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables = knowledge_base_object.GetEnvironmentVariables()", "self._WINDOWS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2]) self.assertEqual(username, 'frank') username", "shared_test_lib class KnowledgeBaseTest(shared_test_lib.BaseTestCase): \"\"\"Tests for the knowledge base.\"\"\" # pylint:", "self.assertEqual(len(knowledge_base_object.user_accounts), 0) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts),", "username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4]) self.assertEqual(username, 'hans') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0])", "\"\"\"Tests the GetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname = knowledge_base_object.GetHostname()", "TODO: add tests for GetTextPrepend. def testGetUsernameByIdentifier(self): \"\"\"Tests the GetUsernameByIdentifier", "\"\"\"Tests the operating_system property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() operating_system = knowledge_base_object.GetValue('operating_system')", "artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) system_configuration", "('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log', '/Users/frank/Library/Application Data/Google/Chrome/Default', '/Users/hans/Library/Application Data/Google/Chrome/Default', ('/Users/frank/Library/Application Data/Google/Chrome/Default/'", "= [ 'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions', ('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions', ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'),", "'testuser') usename = knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename, '') usename = knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename,", "SetValue functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() expected_value = 'test value' knowledge_base_object.SetValue('Test',", "the knowledge base.\"\"\" # pylint: disable=protected-access _MACOS_PATHS = [ '/Users/dude/Library/Application", "user_directory=user.get('path', None), username=user.get('name', None)) knowledge_base_object.AddUserAccount(user_account) def testCodepageProperty(self): \"\"\"Tests the codepage", "SetTimeZone function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() time_zone_artifact = artifacts.TimeZoneArtifact( localized_name='Eastern (standaardtijd)',", "1) self.assertIsNotNone(source_configurations[0]) system_configuration = source_configurations[0].system_configuration self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') def", "'hans', 'path': '/Users/hans', 'sid': '4352'}, {'name': 'dude', 'path': '/Users/dude', 'sid':", "the knowledge base.\"\"\" import unittest from plaso.containers import artifacts from", "self.assertEqual(len(environment_variables), 2) def testGetHostname(self): \"\"\"Tests the GetHostname function.\"\"\" knowledge_base_object =", "system_configuration = source_configurations[0].system_configuration self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') def testGetSystemConfigurationArtifact(self): \"\"\"Tests", "'Bogus') self.assertIsNone(test_environment_variable) def testGetEnvironmentVariables(self): \"\"\"Tests the GetEnvironmentVariables function.\"\"\" knowledge_base_object =", "time zone name. knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') with self.assertRaises(ValueError): knowledge_base_object.SetTimeZone('Bogus') if", "= knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual( knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual( knowledge_base_object._active_session,", "the timezone property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone, 'UTC') def testUserAccountsProperty(self):", "def testYearProperty(self): \"\"\"Tests the year property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year,", "name. knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') with self.assertRaises(ValueError): knowledge_base_object.SetTimeZone('Bogus') if __name__ ==", "'UTC') def testUserAccountsProperty(self): \"\"\"Tests the user accounts property.\"\"\" knowledge_base_object =", "user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) usename = knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename, 'testuser') usename =", "knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252') with self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus') def testSetHostname(self): \"\"\"Tests the SetHostname", "knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') with self.assertRaises(ValueError): knowledge_base_object.SetTimeZone('Bogus') if __name__ == '__main__':", "GetUsernameForPath function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS) username = knowledge_base_object.GetUsernameForPath(", "( knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations), 1) self.assertIsNotNone(source_configurations[0]) system_configuration = source_configurations[0].system_configuration self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname)", "system_configuration.hostname = artifacts.HostnameArtifact( name='myhost.mydomain') user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser')", "knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) with self.assertRaises(KeyError):", "'') def testGetUsernameForPath(self): \"\"\"Tests the GetUsernameForPath function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "'S-1'}, {'name': 'frank', 'path': 'C:\\\\Users\\\\frank', 'sid': 'S-2'}] def _SetUserAccounts(self, knowledge_base_object,", "'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions', ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\\\Windows\\\\System32', 'C:\\\\Stuff/with path separator\\\\Folder'] _WINDOWS_USERS = [", "knowledge_base from tests import test_lib as shared_test_lib class KnowledgeBaseTest(shared_test_lib.BaseTestCase): \"\"\"Tests", "'sid': '4052'}, {'name': 'hans', 'path': '/Users/hans', 'sid': '4352'}, {'name': 'dude',", "user_account = artifacts.UserAccountArtifact( identifier=identifier, user_directory=user.get('path', None), username=user.get('name', None)) knowledge_base_object.AddUserAccount(user_account) def", "= artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='WinDir', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables = knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables),", "knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable =", "def testGetEnvironmentVariables(self): \"\"\"Tests the GetEnvironmentVariables function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable", "knowledge_base_object.AddUserAccount(user_account) source_configurations = ( knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations), 1) self.assertIsNotNone(source_configurations[0]) system_configuration =", "case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) with self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable) def testGetEnvironmentVariable(self): \"\"\"Tests", "knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account)", "testReadSystemConfigurationArtifact(self): \"\"\"Tests the ReadSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() system_configuration =", "user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) source_configurations = (", "user accounts in the knowledge base. Args: knowledge_base_object (KnowledgeBase): knowledge", "knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252') with self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus') def testSetHostname(self): \"\"\"Tests", "zone name. knowledge_base_object.SetTimeZone('Eastern (standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a MUI", "system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, 'myhost.mydomain') def testSetActiveSession(self): \"\"\"Tests", "[ {'name': 'root', 'path': '/var/root', 'sid': '0'}, {'name': 'frank', 'path':", "Set a localized Windows time zone name. knowledge_base_object.SetTimeZone('Eastern (standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone,", "AddUserAccount function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser',", "artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='WinDir',", "GetTextPrepend. def testGetUsernameByIdentifier(self): \"\"\"Tests the GetUsernameByIdentifier function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich') # Set a Windows time zone name.", "username='testuser') knowledge_base_object.AddUserAccount(user_account) usename = knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename, 'testuser') usename = knowledge_base_object.GetUsernameByIdentifier(1000)", "IANA time zone name. knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich') # Set a", "knowledge_base_object = knowledge_base.KnowledgeBase() time_zone_artifact = artifacts.TimeZoneArtifact( localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112', name='Eastern", "self.assertIsNone(username) knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS) username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0])", "'America/New_York') # Set a localized Windows time zone name. knowledge_base_object.SetTimeZone('Eastern", "name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) with self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable) def testGetEnvironmentVariable(self): \"\"\"Tests the", "username=user.get('name', None)) knowledge_base_object.AddUserAccount(user_account) def testCodepageProperty(self): \"\"\"Tests the codepage property.\"\"\" knowledge_base_object", "knowledge_base_object.GetValue('Bogus') self.assertIsNone(value) def testHasUserAccounts(self): \"\"\"Tests the HasUserAccounts function.\"\"\" knowledge_base_object =", "knowledge_base_object.SetCodepage('bogus') def testSetHostname(self): \"\"\"Tests the SetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year, 0) def testAddUserAccount(self): \"\"\"Tests the", "knowledge_base.KnowledgeBase() time_zone_artifact = artifacts.TimeZoneArtifact( localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112', name='Eastern Standard Time')", "def testGetUsernameForPath(self): \"\"\"Tests the GetUsernameForPath function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object,", "expected_value) value = knowledge_base_object.GetValue('tEsT') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('Bogus') self.assertIsNone(value)", "artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname,", "knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual( knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual( knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION) def", "\"\"\"Tests the AddUserAccount function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact(", "SetCodepage function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252') with self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus') def", "self.assertEqual(hostname, 'myhost.mydomain') def testSetActiveSession(self): \"\"\"Tests the SetActiveSession function.\"\"\" knowledge_base_object =", "the user accounts property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts), 0) user_account", "knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual( knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION) def testSetCodepage(self): \"\"\"Tests", "codepage property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage, 'cp1252') def testHostnameProperty(self): \"\"\"Tests", "self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable) def testGetEnvironmentVariable(self): \"\"\"Tests the GetEnvironmentVariable functions.\"\"\" knowledge_base_object =", "knowledge_base_object.GetValue('Test') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('tEsT') self.assertEqual(value, expected_value) value =", "user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts), 1) def testYearProperty(self): \"\"\"Tests the year", "= knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable", "Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log', '/Users/frank/Library/Application Data/Google/Chrome/Default', '/Users/hans/Library/Application Data/Google/Chrome/Default', ('/Users/frank/Library/Application Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'),", "zone name. knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich') # Set a Windows time", "'America/New_York') # Set a MUI form Windows time zone name.", "form Windows time zone name. knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') with self.assertRaises(ValueError):", "= knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertIsNone(username) knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS) username", "knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) # TODO: add tests for", "self.assertTrue(knowledge_base_object.HasUserAccounts()) def testReadSystemConfigurationArtifact(self): \"\"\"Tests the ReadSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') # TODO: add tests for GetTextPrepend.", "name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='WinDir', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable)", "tests import test_lib as shared_test_lib class KnowledgeBaseTest(shared_test_lib.BaseTestCase): \"\"\"Tests for the", "class KnowledgeBaseTest(shared_test_lib.BaseTestCase): \"\"\"Tests for the knowledge base.\"\"\" # pylint: disable=protected-access", "'myhost.mydomain') def testGetSystemConfigurationArtifact(self): \"\"\"Tests the _GetSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "= source_configurations[0].system_configuration self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') def testGetSystemConfigurationArtifact(self): \"\"\"Tests the", "property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() operating_system = knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system', 'Windows')", "GetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, '')", "\"\"\"Tests the Get and SetValue functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() expected_value", "path separator\\\\Folder'] _WINDOWS_USERS = [ {'name': 'dude', 'path': 'C:\\\\Users\\\\dude', 'sid':", "the SetActiveSession function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual( knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a')", "knowledge base. Args: knowledge_base_object (KnowledgeBase): knowledge base. users (list[dict[str,str])): users.", "\"\"\"Tests the HasUserAccounts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account =", "accounts property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts), 0) user_account = artifacts.UserAccountArtifact(", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252') with self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus') def testSetHostname(self):", "with self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus') def testSetHostname(self): \"\"\"Tests the SetHostname function.\"\"\" knowledge_base_object", "= artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) with self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable) def", "knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS) username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertEqual(username,", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) # TODO:", "username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertIsNone(username) knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS)", "Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions'] _MACOS_USERS = [ {'name': 'root', 'path':", "'/Users/frank', 'sid': '4052'}, {'name': 'hans', 'path': '/Users/hans', 'sid': '4352'}, {'name':", "self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') def testGetSystemConfigurationArtifact(self): \"\"\"Tests the _GetSystemConfigurationArtifact function.\"\"\" knowledge_base_object =", "\"\"\"Tests the _GetSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')", "self.assertEqual(knowledge_base_object.hostname, '') def testOperatingSystemProperty(self): \"\"\"Tests the operating_system property.\"\"\" knowledge_base_object =", "'C:\\\\Windows\\\\System32', 'C:\\\\Stuff/with path separator\\\\Folder'] _WINDOWS_USERS = [ {'name': 'dude', 'path':", "AddEnvironmentVariable function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot',", "tests for SetMountPoint. # TODO: add tests for SetTextPrepend. def", "the codepage property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage, 'cp1252') def testHostnameProperty(self):", "testAddEnvironmentVariable(self): \"\"\"Tests the AddEnvironmentVariable function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable =", "base. users (list[dict[str,str])): users. \"\"\" for user in users: identifier", "Args: knowledge_base_object (KnowledgeBase): knowledge base. users (list[dict[str,str])): users. \"\"\" for", "knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2]) self.assertIsNone(username) def testGetSetValue(self): \"\"\"Tests the Get and SetValue", "self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4]) self.assertEqual(username, 'hans') username =", "value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) with self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable) def testGetEnvironmentVariable(self): \"\"\"Tests the GetEnvironmentVariable", "self.assertEqual(usename, '') usename = knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename, '') def testGetUsernameForPath(self): \"\"\"Tests", "value = knowledge_base_object.GetValue('Test') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('tEsT') self.assertEqual(value, expected_value)", "knowledge_base_object.AddUserAccount(user_account) usename = knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename, 'testuser') usename = knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename,", "time zone name. knowledge_base_object.SetTimeZone('Eastern Standard Time') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set", "knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts), 1) def testYearProperty(self): \"\"\"Tests the year property.\"\"\" knowledge_base_object", "'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'Bogus') self.assertIsNone(test_environment_variable) def testGetEnvironmentVariables(self): \"\"\"Tests", "a Windows time zone name. knowledge_base_object.SetTimeZone('Eastern Standard Time') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')", "= knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system, 'Windows') def testTimezoneProperty(self): \"\"\"Tests the timezone property.\"\"\"", "'4352'}, {'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}] _WINDOWS_PATHS = [", "knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual( knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual(", "'hans') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertIsNone(username) knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object,", "\"\"\"Tests the timezone property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone, 'UTC') def", "knowledge_base.KnowledgeBase() system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.hostname = artifacts.HostnameArtifact( name='myhost.mydomain') user_account =", "self.assertEqual(knowledge_base_object.timezone.zone, 'UTC') def testUserAccountsProperty(self): \"\"\"Tests the user accounts property.\"\"\" knowledge_base_object", "for GetTextPrepend. def testGetUsernameByIdentifier(self): \"\"\"Tests the GetUsernameByIdentifier function.\"\"\" knowledge_base_object =", "disable=protected-access _MACOS_PATHS = [ '/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log',", "testGetSourceConfigurationArtifacts(self): \"\"\"Tests the GetSourceConfigurationArtifacts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact =", "def testUserAccountsProperty(self): \"\"\"Tests the user accounts property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) system_configuration = ( knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname)", "( knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') # TODO: add tests", "artifacts.HostnameArtifact( name='myhost.mydomain') user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration)", "self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich') # Set a Windows time zone name. knowledge_base_object.SetTimeZone('Eastern", "knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts), 0) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser',", "= knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) with", "the GetEnvironmentVariables function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False,", "def testGetSystemConfigurationArtifact(self): \"\"\"Tests the _GetSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact", "knowledge_base_object, users): \"\"\"Sets the user accounts in the knowledge base.", "= artifacts.HostnameArtifact( name='myhost.mydomain') user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') system_configuration.user_accounts.append(user_account)", "expected_value = 'test value' knowledge_base_object.SetValue('Test', expected_value) value = knowledge_base_object.GetValue('Test') self.assertEqual(value,", "Standard Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) # Set an IANA time zone name.", "= knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename, '') def testGetUsernameForPath(self): \"\"\"Tests the GetUsernameForPath function.\"\"\"", "artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'SystemRoot') self.assertIsNotNone(test_environment_variable)", "\"\"\"Tests the SetTimeZone function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() time_zone_artifact = artifacts.TimeZoneArtifact(", "knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) # TODO: add", "self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS) username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertEqual(username, 'dude') username =", "knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable)", "the _GetSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact)", "= knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename, 'testuser') usename = knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename, '') usename", "('/Users/frank/Library/Application Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions'] _MACOS_USERS = [ {'name': 'root',", "\"\"\"Tests the codepage property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage, 'cp1252') def", "hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, '') # TODO: add tests for", "0) def testAddUserAccount(self): \"\"\"Tests the AddUserAccount function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts), 1) def", "-*- \"\"\"Tests for the knowledge base.\"\"\" import unittest from plaso.containers", "self.assertEqual( knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual( knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION) def testSetCodepage(self):", "'1123'}] _WINDOWS_PATHS = [ 'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions', ('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions',", "= knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2]) self.assertEqual(username, 'frank') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2]) self.assertIsNone(username)", "knowledge_base_object.SetCodepage('cp1252') with self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus') def testSetHostname(self): \"\"\"Tests the SetHostname function.\"\"\"", "self.assertEqual(len(knowledge_base_object.user_accounts), 1) def testYearProperty(self): \"\"\"Tests the year property.\"\"\" knowledge_base_object =", "testUserAccountsProperty(self): \"\"\"Tests the user accounts property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts),", "add tests for GetTextPrepend. def testGetUsernameByIdentifier(self): \"\"\"Tests the GetUsernameByIdentifier function.\"\"\"", "= artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) system_configuration = ( knowledge_base_object._GetSystemConfigurationArtifact())", "= artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) with self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account) def", "_SetUserAccounts(self, knowledge_base_object, users): \"\"\"Sets the user accounts in the knowledge", "\"\"\"Tests the AddEnvironmentVariable function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact(", "artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts()) def testReadSystemConfigurationArtifact(self): \"\"\"Tests the", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, '') #", "'frank', 'path': '/Users/frank', 'sid': '4052'}, {'name': 'hans', 'path': '/Users/hans', 'sid':", "user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) system_configuration = (", "artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) with self.assertRaises(KeyError): knowledge_base_object.AddEnvironmentVariable(environment_variable) def testGetEnvironmentVariable(self):", "knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS) username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0]) self.assertEqual(username,", "value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='WinDir', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables", "user accounts property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts), 0) user_account =", "(standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a MUI form Windows time", "('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions', ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\\\Windows\\\\System32', 'C:\\\\Stuff/with path", "for the knowledge base.\"\"\" # pylint: disable=protected-access _MACOS_PATHS = [", "'path': '/Users/hans', 'sid': '4352'}, {'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}]", "\"\"\"Sets the user accounts in the knowledge base. Args: knowledge_base_object", "plaso.engine import knowledge_base from tests import test_lib as shared_test_lib class", "testTimezoneProperty(self): \"\"\"Tests the timezone property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone, 'UTC')", "= knowledge_base_object.GetValue('tEsT') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('Bogus') self.assertIsNone(value) def testHasUserAccounts(self):", "users (list[dict[str,str])): users. \"\"\" for user in users: identifier =", "Windows time zone name. knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') with self.assertRaises(ValueError): knowledge_base_object.SetTimeZone('Bogus')", "= artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account)", "zone name. knowledge_base_object.SetTimeZone('Eastern Standard Time') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a", "knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'SystemRoot') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT')", "testGetHostname(self): \"\"\"Tests the GetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname =", "knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year, 0) def testAddUserAccount(self): \"\"\"Tests the AddUserAccount function.\"\"\" knowledge_base_object", "the GetEnvironmentVariable functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False,", "a MUI form Windows time zone name. knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')", "knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4]) self.assertEqual(username, 'hans') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertIsNone(username) knowledge_base_object", "None)) if not identifier: continue user_account = artifacts.UserAccountArtifact( identifier=identifier, user_directory=user.get('path',", "functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows')", "'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions', ('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions', ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\\\Windows\\\\System32', 'C:\\\\Stuff/with", "not identifier: continue user_account = artifacts.UserAccountArtifact( identifier=identifier, user_directory=user.get('path', None), username=user.get('name',", "add tests for GetMountPoint. def testGetSourceConfigurationArtifacts(self): \"\"\"Tests the GetSourceConfigurationArtifacts function.\"\"\"", "knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2]) self.assertEqual(username, 'frank')", "self.assertIsNone(username) def testGetSetValue(self): \"\"\"Tests the Get and SetValue functions.\"\"\" knowledge_base_object", "knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS) username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0]) self.assertEqual(username, 'dude') username", "\"\"\"Tests the GetSourceConfigurationArtifacts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')", "= 'test value' knowledge_base_object.SetValue('Test', expected_value) value = knowledge_base_object.GetValue('Test') self.assertEqual(value, expected_value)", "self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a localized Windows time zone name.", "knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts), 0) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account)", "= knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable", "knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION) def testSetCodepage(self): \"\"\"Tests the SetCodepage function.\"\"\" knowledge_base_object =", "artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) system_configuration = ( knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration)", "= knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone, 'UTC') def testUserAccountsProperty(self): \"\"\"Tests the user accounts", "'S-2'}] def _SetUserAccounts(self, knowledge_base_object, users): \"\"\"Sets the user accounts in", "= knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000',", "base.\"\"\" # pylint: disable=protected-access _MACOS_PATHS = [ '/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application", "self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus') def testSetHostname(self): \"\"\"Tests the SetHostname function.\"\"\" knowledge_base_object =", "= [ {'name': 'dude', 'path': 'C:\\\\Users\\\\dude', 'sid': 'S-1'}, {'name': 'frank',", "def testCodepageProperty(self): \"\"\"Tests the codepage property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage,", "as shared_test_lib class KnowledgeBaseTest(shared_test_lib.BaseTestCase): \"\"\"Tests for the knowledge base.\"\"\" #", "operating_system = knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system, 'Windows') def testTimezoneProperty(self): \"\"\"Tests the timezone", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\"Tests for the", "'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log', '/Users/frank/Library/Application Data/Google/Chrome/Default', '/Users/hans/Library/Application Data/Google/Chrome/Default', ('/Users/frank/Library/Application Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application", "SetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) #", "\"\"\"Tests for the knowledge base.\"\"\" # pylint: disable=protected-access _MACOS_PATHS =", "= [ {'name': 'root', 'path': '/var/root', 'sid': '0'}, {'name': 'frank',", "system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.hostname = artifacts.HostnameArtifact( name='myhost.mydomain') user_account = artifacts.UserAccountArtifact(", "{'name': 'hans', 'path': '/Users/hans', 'sid': '4352'}, {'name': 'dude', 'path': '/Users/dude',", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser')", "case_sensitive=False, name='WinDir', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables = knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables), 2) def", "hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) # TODO: add tests for SetMountPoint.", "Windows time zone name. knowledge_base_object.SetTimeZone('Eastern (standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set", "username='testuser') knowledge_base_object.AddUserAccount(user_account) source_configurations = ( knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations), 1) self.assertIsNotNone(source_configurations[0]) system_configuration", "continue user_account = artifacts.UserAccountArtifact( identifier=identifier, user_directory=user.get('path', None), username=user.get('name', None)) knowledge_base_object.AddUserAccount(user_account)", "identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts), 1) def testYearProperty(self): \"\"\"Tests the", "knowledge_base_object = knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser')", "localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112', name='Eastern Standard Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) # Set an", "'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\\\Windows\\\\System32', 'C:\\\\Stuff/with path separator\\\\Folder'] _WINDOWS_USERS = [ {'name': 'dude',", "knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system', 'Windows') operating_system = knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system, 'Windows') def", "identifier='1000', user_directory='/home/testuser', username='testuser') system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, 'myhost.mydomain')", "knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, 'myhost.mydomain') def testSetActiveSession(self): \"\"\"Tests the", "= artifacts.TimeZoneArtifact( localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112', name='Eastern Standard Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) #", "'cp1252') def testHostnameProperty(self): \"\"\"Tests the hostname property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions'] _MACOS_USERS = [ {'name': 'root', 'path': '/var/root',", "the Get and SetValue functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() expected_value =", "Time') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a localized Windows time zone", "self.assertEqual(username, 'hans') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertIsNone(username) knowledge_base_object = knowledge_base.KnowledgeBase()", "= knowledge_base.KnowledgeBase() expected_value = 'test value' knowledge_base_object.SetValue('Test', expected_value) value =", "self.assertEqual(hostname, '') # TODO: add tests for GetMountPoint. def testGetSourceConfigurationArtifacts(self):", "testHostnameProperty(self): \"\"\"Tests the hostname property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname, '')", "the GetSourceConfigurationArtifacts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact)", "= knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2]) self.assertIsNone(username) def testGetSetValue(self): \"\"\"Tests the Get and", "knowledge_base_object.SetHostname(hostname_artifact) # TODO: add tests for SetMountPoint. # TODO: add", "'frank', 'path': 'C:\\\\Users\\\\frank', 'sid': 'S-2'}] def _SetUserAccounts(self, knowledge_base_object, users): \"\"\"Sets", "= knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2]) self.assertEqual(username,", "\"\"\"Tests the SetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')", "SetActiveSession function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual( knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession(", "knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) with self.assertRaises(KeyError):", "'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual( knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION) def testSetCodepage(self): \"\"\"Tests the", "base.\"\"\" import unittest from plaso.containers import artifacts from plaso.engine import", "knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename, '') usename = knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename, '') def testGetUsernameForPath(self):", "= knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account)", "= ( knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations), 1) self.assertIsNotNone(source_configurations[0]) system_configuration = source_configurations[0].system_configuration self.assertIsNotNone(system_configuration)", "testGetSystemConfigurationArtifact(self): \"\"\"Tests the _GetSystemConfigurationArtifact function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact =", "self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account) def testAddEnvironmentVariable(self): \"\"\"Tests the AddEnvironmentVariable function.\"\"\" knowledge_base_object =", "self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') def testGetSystemConfigurationArtifact(self): \"\"\"Tests the _GetSystemConfigurationArtifact function.\"\"\"", "self.assertIsNotNone(source_configurations[0]) system_configuration = source_configurations[0].system_configuration self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') def testGetSystemConfigurationArtifact(self):", "'/Users/dude', 'sid': '1123'}] _WINDOWS_PATHS = [ 'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions', ('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\'", "'path': 'C:\\\\Users\\\\dude', 'sid': 'S-1'}, {'name': 'frank', 'path': 'C:\\\\Users\\\\frank', 'sid': 'S-2'}]", "a localized Windows time zone name. knowledge_base_object.SetTimeZone('Eastern (standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')", "'Windows') def testTimezoneProperty(self): \"\"\"Tests the timezone property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'Bogus') self.assertIsNone(test_environment_variable) def testGetEnvironmentVariables(self): \"\"\"Tests the GetEnvironmentVariables", "if not identifier: continue user_account = artifacts.UserAccountArtifact( identifier=identifier, user_directory=user.get('path', None),", "= knowledge_base.KnowledgeBase() operating_system = knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system', 'Windows') operating_system =", "GetEnvironmentVariable functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot',", "self._WINDOWS_PATHS[2]) self.assertEqual(username, 'frank') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2]) self.assertIsNone(username) def testGetSetValue(self):", "'/var/root', 'sid': '0'}, {'name': 'frank', 'path': '/Users/frank', 'sid': '4052'}, {'name':", "username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2]) self.assertEqual(username, 'frank') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2])", "and SetValue functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() expected_value = 'test value'", "None), username=user.get('name', None)) knowledge_base_object.AddUserAccount(user_account) def testCodepageProperty(self): \"\"\"Tests the codepage property.\"\"\"", "knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='WinDir', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables =", "= knowledge_base_object.GetValue('Test') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('tEsT') self.assertEqual(value, expected_value) value", "'sid': 'S-2'}] def _SetUserAccounts(self, knowledge_base_object, users): \"\"\"Sets the user accounts", "= knowledge_base_object.GetHostname() self.assertEqual(hostname, 'myhost.mydomain') def testSetActiveSession(self): \"\"\"Tests the SetActiveSession function.\"\"\"", "# Set a Windows time zone name. knowledge_base_object.SetTimeZone('Eastern Standard Time')", "knowledge_base_object (KnowledgeBase): knowledge base. users (list[dict[str,str])): users. \"\"\" for user", "with self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account) def testAddEnvironmentVariable(self): \"\"\"Tests the AddEnvironmentVariable function.\"\"\" knowledge_base_object", "def testHostnameProperty(self): \"\"\"Tests the hostname property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname,", "Set an IANA time zone name. knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich') #", "TODO: add tests for GetMountPoint. def testGetSourceConfigurationArtifacts(self): \"\"\"Tests the GetSourceConfigurationArtifacts", "time zone name. knowledge_base_object.SetTimeZone('Eastern (standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a", "Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) # Set an IANA time zone name. knowledge_base_object.SetTimeZone('Europe/Zurich')", "self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') # TODO: add tests for GetTextPrepend. def", "Get and SetValue functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() expected_value = 'test", "= artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts()) def testReadSystemConfigurationArtifact(self): \"\"\"Tests", "plaso.containers import artifacts from plaso.engine import knowledge_base from tests import", "time zone name. knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich') # Set a Windows", "knowledge_base.KnowledgeBase() hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, '') # TODO: add tests", "'/private/var/log/system.log', '/Users/frank/Library/Application Data/Google/Chrome/Default', '/Users/hans/Library/Application Data/Google/Chrome/Default', ('/Users/frank/Library/Application Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions']", "operating_system property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() operating_system = knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system',", "= artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'SystemRoot')", "knowledge_base_object.SetHostname(hostname_artifact) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) system_configuration =", "GetUsernameByIdentifier function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser',", "name. knowledge_base_object.SetTimeZone('Eastern (standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a MUI form", "self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') def testGetSystemConfigurationArtifact(self): \"\"\"Tests the _GetSystemConfigurationArtifact function.\"\"\" knowledge_base_object", "= knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) with", "{'name': 'root', 'path': '/var/root', 'sid': '0'}, {'name': 'frank', 'path': '/Users/frank',", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS) username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0])", "\"\"\"Tests the hostname property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname, '') def", "knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone, 'UTC') def testUserAccountsProperty(self): \"\"\"Tests the user accounts property.\"\"\"", "'hmjkmjkepdijhoojdojkdfohbdgmmhki'), ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'blpcfgokakmgnkcojhhkbfbldkacnbeo'), 'C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions', ('C:\\\\Users\\\\frank\\\\AppData\\\\Local\\\\Google\\\\Chrome\\\\Extensions\\\\' 'icppfcnhkcmnfdhfhphakoifcfokfdhg'), 'C:\\\\Windows\\\\System32', 'C:\\\\Stuff/with path separator\\\\Folder']", "None)) knowledge_base_object.AddUserAccount(user_account) def testCodepageProperty(self): \"\"\"Tests the codepage property.\"\"\" knowledge_base_object =", "testSetActiveSession(self): \"\"\"Tests the SetActiveSession function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual(", "localized Windows time zone name. knowledge_base_object.SetTimeZone('Eastern (standaardtijd)') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') #", "# Set a localized Windows time zone name. knowledge_base_object.SetTimeZone('Eastern (standaardtijd)')", "artifacts.SystemConfigurationArtifact() system_configuration.hostname = artifacts.HostnameArtifact( name='myhost.mydomain') user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser',", "knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year, 0) def testAddUserAccount(self): \"\"\"Tests the AddUserAccount", "usename = knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename, '') usename = knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename, '')", "'path': '/Users/dude', 'sid': '1123'}] _WINDOWS_PATHS = [ 'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions', ('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\' 'hmjkmjkepdijhoojdojkdfohbdgmmhki'),", "property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage, 'cp1252') def testHostnameProperty(self): \"\"\"Tests the", "def testGetUsernameByIdentifier(self): \"\"\"Tests the GetUsernameByIdentifier function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account", "# TODO: add tests for GetMountPoint. def testGetSourceConfigurationArtifacts(self): \"\"\"Tests the", "knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables), 2) def testGetHostname(self): \"\"\"Tests the GetHostname function.\"\"\" knowledge_base_object", "knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname, '') def testOperatingSystemProperty(self): \"\"\"Tests the operating_system property.\"\"\" knowledge_base_object", "name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'SystemRoot') self.assertIsNotNone(test_environment_variable) test_environment_variable =", "'sid': 'S-1'}, {'name': 'frank', 'path': 'C:\\\\Users\\\\frank', 'sid': 'S-2'}] def _SetUserAccounts(self,", "= knowledge_base_object.GetValue('Bogus') self.assertIsNone(value) def testHasUserAccounts(self): \"\"\"Tests the HasUserAccounts function.\"\"\" knowledge_base_object", "def testSetActiveSession(self): \"\"\"Tests the SetActiveSession function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a')", "username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2]) self.assertIsNone(username) def testGetSetValue(self): \"\"\"Tests the Get", "self.assertEqual(usename, '') def testGetUsernameForPath(self): \"\"\"Tests the GetUsernameForPath function.\"\"\" knowledge_base_object =", "testHasUserAccounts(self): \"\"\"Tests the HasUserAccounts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account", "def _SetUserAccounts(self, knowledge_base_object, users): \"\"\"Sets the user accounts in the", "for user in users: identifier = user.get('sid', user.get('uid', None)) if", "the GetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname,", "'/Users/hans/Library/Application Data/Google/Chrome/Default', ('/Users/frank/Library/Application Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions'] _MACOS_USERS = [", "def testAddEnvironmentVariable(self): \"\"\"Tests the AddEnvironmentVariable function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable", "knowledge_base_object = knowledge_base.KnowledgeBase() hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, '') # TODO:", "unittest from plaso.containers import artifacts from plaso.engine import knowledge_base from", "# TODO: add tests for SetMountPoint. # TODO: add tests", "\"\"\"Tests the user accounts property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts), 0)", "self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'Bogus')", "def testGetSetValue(self): \"\"\"Tests the Get and SetValue functions.\"\"\" knowledge_base_object =", "the SetTimeZone function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() time_zone_artifact = artifacts.TimeZoneArtifact( localized_name='Eastern", "TODO: add tests for SetTextPrepend. def testSetTimeZone(self): \"\"\"Tests the SetTimeZone", "= artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False,", "def testGetSourceConfigurationArtifacts(self): \"\"\"Tests the GetSourceConfigurationArtifacts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact", "= knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables), 2) def testGetHostname(self): \"\"\"Tests the GetHostname function.\"\"\"", "self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS) username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0]) self.assertEqual(username, 'dude') username =", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertFalse(knowledge_base_object.HasUserAccounts()) user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser',", "the GetUsernameByIdentifier function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000',", "= knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS) username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertEqual(username, 'dude')", "knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system, 'Windows') def testTimezoneProperty(self): \"\"\"Tests the timezone property.\"\"\" knowledge_base_object", "knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') # TODO: add tests for", "\"\"\"Tests for the knowledge base.\"\"\" import unittest from plaso.containers import", "self.assertIsNone(test_environment_variable) def testGetEnvironmentVariables(self): \"\"\"Tests the GetEnvironmentVariables function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "'4052'}, {'name': 'hans', 'path': '/Users/hans', 'sid': '4352'}, {'name': 'dude', 'path':", "user_directory='/home/testuser', username='testuser') system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, 'myhost.mydomain') def", "artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts), 1) def testYearProperty(self): \"\"\"Tests", "property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(len(knowledge_base_object.user_accounts), 0) user_account = artifacts.UserAccountArtifact( identifier='1000',", "identifier=identifier, user_directory=user.get('path', None), username=user.get('name', None)) knowledge_base_object.AddUserAccount(user_account) def testCodepageProperty(self): \"\"\"Tests the", "'/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions'] _MACOS_USERS = [ {'name': 'root', 'path': '/var/root', 'sid':", "self.assertIsNone(operating_system) knowledge_base_object.SetValue('operating_system', 'Windows') operating_system = knowledge_base_object.GetValue('operating_system') self.assertEqual(operating_system, 'Windows') def testTimezoneProperty(self):", "source_configurations[0].system_configuration self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') def testGetSystemConfigurationArtifact(self): \"\"\"Tests the _GetSystemConfigurationArtifact", "knowledge_base_object.AddUserAccount(user_account) system_configuration = ( knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') #", "\"\"\"Tests the GetEnvironmentVariable functions.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact(", "= knowledge_base.KnowledgeBase() user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) usename", "= artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) source_configurations = ( knowledge_base_object.GetSourceConfigurationArtifacts())", "knowledge_base_object.SetValue('Test', expected_value) value = knowledge_base_object.GetValue('Test') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('tEsT')", "import knowledge_base from tests import test_lib as shared_test_lib class KnowledgeBaseTest(shared_test_lib.BaseTestCase):", "testSetTimeZone(self): \"\"\"Tests the SetTimeZone function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() time_zone_artifact =", "= knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'Bogus') self.assertIsNone(test_environment_variable) def", "for the knowledge base.\"\"\" import unittest from plaso.containers import artifacts", "def testSetCodepage(self): \"\"\"Tests the SetCodepage function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252')", "knowledge_base_object.GetEnvironmentVariable( 'sYsTeMrOoT') self.assertIsNotNone(test_environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable( 'Bogus') self.assertIsNone(test_environment_variable) def testGetEnvironmentVariables(self):", "'path': 'C:\\\\Users\\\\frank', 'sid': 'S-2'}] def _SetUserAccounts(self, knowledge_base_object, users): \"\"\"Sets the", "knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage, 'cp1252') def testHostnameProperty(self): \"\"\"Tests the hostname property.\"\"\" knowledge_base_object", "username='testuser') knowledge_base_object.AddUserAccount(user_account) with self.assertRaises(KeyError): knowledge_base_object.AddUserAccount(user_account) def testAddEnvironmentVariable(self): \"\"\"Tests the AddEnvironmentVariable", "testCodepageProperty(self): \"\"\"Tests the codepage property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.codepage, 'cp1252')", "'dude') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4]) self.assertEqual(username, 'hans') username = knowledge_base_object.GetUsernameForPath(", "self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2]) self.assertEqual(username, 'frank') username =", "knowledge_base.KnowledgeBase() knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a') self.assertEqual( knowledge_base_object._active_session, 'ddda05bedf324cbd99fa8c24b8a0037a') knowledge_base_object.SetActiveSession( knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual( knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION)", "knowledge_base_object = knowledge_base.KnowledgeBase() expected_value = 'test value' knowledge_base_object.SetValue('Test', expected_value) value", "knowledge_base_object.GetHostname() self.assertEqual(hostname, 'myhost.mydomain') def testSetActiveSession(self): \"\"\"Tests the SetActiveSession function.\"\"\" knowledge_base_object", "the SetCodepage function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() knowledge_base_object.SetCodepage('cp1252') with self.assertRaises(ValueError): knowledge_base_object.SetCodepage('bogus')", "def testSetTimeZone(self): \"\"\"Tests the SetTimeZone function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() time_zone_artifact", "knowledge_base_object.SetTimeZone('Eastern Standard Time') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') # Set a localized Windows", "= knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS) username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0]) self.assertEqual(username, 'dude')", "pylint: disable=protected-access _MACOS_PATHS = [ '/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'),", "property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone, 'UTC') def testUserAccountsProperty(self): \"\"\"Tests the", "knowledge_base_object.GetUsernameByIdentifier('1001') self.assertEqual(usename, '') def testGetUsernameForPath(self): \"\"\"Tests the GetUsernameForPath function.\"\"\" knowledge_base_object", "self.assertEqual(knowledge_base_object.codepage, 'cp1252') def testHostnameProperty(self): \"\"\"Tests the hostname property.\"\"\" knowledge_base_object =", "the hostname property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname, '') def testOperatingSystemProperty(self):", "user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts()) def testReadSystemConfigurationArtifact(self):", "= artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) usename = knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename,", "value = knowledge_base_object.GetValue('tEsT') self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('Bogus') self.assertIsNone(value) def", "knowledge_base_object = knowledge_base.KnowledgeBase() system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.hostname = artifacts.HostnameArtifact( name='myhost.mydomain')", "testYearProperty(self): \"\"\"Tests the year property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.year, 0)", "knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable =", "the operating_system property.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() operating_system = knowledge_base_object.GetValue('operating_system') self.assertIsNone(operating_system)", "self._MACOS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4]) self.assertEqual(username, 'hans') username", "knowledge_base_object._DEFAULT_ACTIVE_SESSION) def testSetCodepage(self): \"\"\"Tests the SetCodepage function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase()", "# Set an IANA time zone name. knowledge_base_object.SetTimeZone('Europe/Zurich') self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich')", "-*- coding: utf-8 -*- \"\"\"Tests for the knowledge base.\"\"\" import", "'frank') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[2]) self.assertIsNone(username) def testGetSetValue(self): \"\"\"Tests the", "artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) usename = knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename, 'testuser')", "environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variable = artifacts.EnvironmentVariableArtifact(", "knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4]) self.assertEqual(username, 'hans')", "self.assertEqual(value, expected_value) value = knowledge_base_object.GetValue('Bogus') self.assertIsNone(value) def testHasUserAccounts(self): \"\"\"Tests the", "the AddEnvironmentVariable function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False,", "import unittest from plaso.containers import artifacts from plaso.engine import knowledge_base", "username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._MACOS_PATHS[4])", "knowledge base.\"\"\" # pylint: disable=protected-access _MACOS_PATHS = [ '/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions',", "GetSourceConfigurationArtifacts function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain') knowledge_base_object.SetHostname(hostname_artifact) user_account", "{'name': 'frank', 'path': '/Users/frank', 'sid': '4052'}, {'name': 'hans', 'path': '/Users/hans',", "environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) test_environment_variable = knowledge_base_object.GetEnvironmentVariable(", "username='testuser') knowledge_base_object.AddUserAccount(user_account) system_configuration = ( knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')", "= artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) self.assertEqual(len(knowledge_base_object.user_accounts), 1) def testYearProperty(self):", "\"\"\"Tests the GetUsernameForPath function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS) username", "knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.hostname, '') def testOperatingSystemProperty(self): \"\"\"Tests the operating_system", "knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertIsNone(username) knowledge_base_object = knowledge_base.KnowledgeBase() self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS) username =", "self.assertEqual( knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION) def testSetCodepage(self): \"\"\"Tests the SetCodepage function.\"\"\" knowledge_base_object", "Windows time zone name. knowledge_base_object.SetTimeZone('Eastern Standard Time') self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York') #", "knowledge_base_object.AddUserAccount(user_account) self.assertTrue(knowledge_base_object.HasUserAccounts()) def testReadSystemConfigurationArtifact(self): \"\"\"Tests the ReadSystemConfigurationArtifact function.\"\"\" knowledge_base_object =", "= ( knowledge_base_object._GetSystemConfigurationArtifact()) self.assertIsNotNone(system_configuration) self.assertIsNotNone(system_configuration.hostname) self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain') # TODO: add", "users: identifier = user.get('sid', user.get('uid', None)) if not identifier: continue", "testSetHostname(self): \"\"\"Tests the SetHostname function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() hostname_artifact =", "knowledge_base_object._DEFAULT_ACTIVE_SESSION) self.assertEqual( knowledge_base_object._active_session, knowledge_base_object._DEFAULT_ACTIVE_SESSION) def testSetCodepage(self): \"\"\"Tests the SetCodepage function.\"\"\"", "username='testuser') system_configuration.user_accounts.append(user_account) knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration) hostname = knowledge_base_object.GetHostname() self.assertEqual(hostname, 'myhost.mydomain') def testSetActiveSession(self):", "username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[0]) self.assertEqual(username, 'dude') username = knowledge_base_object.GetUsernameForPath( self._WINDOWS_PATHS[2])", "for SetMountPoint. # TODO: add tests for SetTextPrepend. def testSetTimeZone(self):", "= knowledge_base.KnowledgeBase() system_configuration = artifacts.SystemConfigurationArtifact() system_configuration.hostname = artifacts.HostnameArtifact( name='myhost.mydomain') user_account", "user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) source_configurations = ( knowledge_base_object.GetSourceConfigurationArtifacts()) self.assertEqual(len(source_configurations), 1) self.assertIsNotNone(source_configurations[0])", "knowledge_base_object.GetUsernameByIdentifier('1000') self.assertEqual(usename, 'testuser') usename = knowledge_base_object.GetUsernameByIdentifier(1000) self.assertEqual(usename, '') usename =", "name='Eastern Standard Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) # Set an IANA time zone", "Data/Google/Chrome/Default', '/Users/hans/Library/Application Data/Google/Chrome/Default', ('/Users/frank/Library/Application Data/Google/Chrome/Default/' 'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'), '/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions'] _MACOS_USERS =", "(standaardtijd)', mui_form='@tzres.dll,-112', name='Eastern Standard Time') knowledge_base_object.AddAvailableTimeZone(time_zone_artifact) # Set an IANA", "artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='WinDir', value='C:\\\\Windows') knowledge_base_object.AddEnvironmentVariable(environment_variable) environment_variables = knowledge_base_object.GetEnvironmentVariables() self.assertEqual(len(environment_variables), 2)", "user_account = artifacts.UserAccountArtifact( identifier='1000', user_directory='/home/testuser', username='testuser') knowledge_base_object.AddUserAccount(user_account) usename = knowledge_base_object.GetUsernameByIdentifier('1000')", "Set a MUI form Windows time zone name. knowledge_base_object.SetTimeZone('@tzres.dll,-112') self.assertEqual(knowledge_base_object._time_zone.zone,", "function.\"\"\" knowledge_base_object = knowledge_base.KnowledgeBase() environment_variable = artifacts.EnvironmentVariableArtifact( case_sensitive=False, name='SystemRoot', value='C:\\\\Windows')", "users): \"\"\"Sets the user accounts in the knowledge base. Args:", "'dude', 'path': '/Users/dude', 'sid': '1123'}] _WINDOWS_PATHS = [ 'C:\\\\Users\\\\Dude\\\\SomeFolder\\\\Chrome\\\\Default\\\\Extensions', ('C:\\\\Users\\\\Dude\\\\SomeNoneStandardFolder\\\\Chrome\\\\Default\\\\Extensions\\\\'", "Set a Windows time zone name. knowledge_base_object.SetTimeZone('Eastern Standard Time') self.assertEqual(knowledge_base_object._time_zone.zone,", "'test value' knowledge_base_object.SetValue('Test', expected_value) value = knowledge_base_object.GetValue('Test') self.assertEqual(value, expected_value) value", "value = knowledge_base_object.GetValue('Bogus') self.assertIsNone(value) def testHasUserAccounts(self): \"\"\"Tests the HasUserAccounts function.\"\"\"", "[ '/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions', ('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/' 'apdfllckaahabafndbhieahigkjlhalf'), '/private/var/log/system.log', '/Users/frank/Library/Application Data/Google/Chrome/Default', '/Users/hans/Library/Application", "knowledge_base_object = knowledge_base.KnowledgeBase() self.assertEqual(knowledge_base_object.timezone.zone, 'UTC') def testUserAccountsProperty(self): \"\"\"Tests the user" ]
[ "\"aa\", \"aaa\", \"aaaa\", \"aaaaa\", \"aaaaaa\", # \"aaaaaaa\", \"aaaaaaaa\", \"aaaaaaaaa\", \"aaaaaaaaaa\"]", "= \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" # words = [\"a\", \"aa\", \"aaa\", \"aaaa\", \"aaaaa\",", "中存在所有 word 都不包含的字母,则直接退出 set1 = set(s) set2 = set(''.join(wordDict)) if", "wordBreak_dfs(self, s: str, wordDict: List[str]) -> List[str]: def dfs(s: str,", "\"cats\", \"and\", \"sand\", \"dog\"])) print(s.wordBreak_dfs('pineapplepenapple', [ \"apple\", \"pen\", \"applepen\", \"pine\",", "[None] * (len(s) + 1) dp[0] = [[]] for i", "[' '.join(res) for res in dp[-1]] def wordBreak_dfs(self, s: str,", "== 0: return [[]] res = [] for w in", "res = [] for w in wordDict: if s.startswith(w): tmp", "Programming/140. Word Break II.py # https://leetcode.com/problems/word-break-ii/ from typing import List", "is None: continue tmp = s[i:] for w in wordDict:", "的意思是,子字符串 s[:i] 能以怎样的方式进行分割 # 如果是 [[]] 则表示开头 # 如果是 [None],则表示还没有访问到,或没有办法进行分割", "res.append([w] + combo) memo[s] = res return res return dfs(s)", "combo in combos: res.append([w] + combo) memo[s] = res return", "dp[-1] is None: return [] return [' '.join(res) for res", "return [] # dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割 # 如果是 [[]]", "res return res return dfs(s) s = Solution() print(s.wordBreak_dfs('catsanddog', [\"cat\",", "https://leetcode.com/problems/word-break-ii/ from typing import List class Solution: def wordBreak(self, s:", "+ [w]) if dp[-1] is None: return [] return ['", "memo[s] = res return res return dfs(s) s = Solution()", "if dp[i] is None: continue tmp = s[i:] for w", "s in memo: return memo[s] if len(s) == 0: return", "[] # 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for dic in dp[i]: dp[idx].append(dic + [w])", "dp[i]: dp[idx].append(dic + [w]) if dp[-1] is None: return []", "tmp = s[len(w):] combos = dfs(tmp, memo) for combo in", "not set1.issubset(set2): return [] # dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割 #", "combo) memo[s] = res return res return dfs(s) s =", "[['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串 dp = [None] * (len(s) +", "做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出 set1 = set(s) set2 =", "for dic in dp[i]: dp[idx].append(dic + [w]) if dp[-1] is", "return dfs(s) s = Solution() print(s.wordBreak_dfs('catsanddog', [\"cat\", \"cats\", \"and\", \"sand\",", "s: str, wordDict: List[str]) -> List[str]: # 做一个快速的检查,如果 s 中存在所有", "1): # 如果当前子字符串无法分割,则跳过 if dp[i] is None: continue tmp =", "in range(len(s) + 1): # 如果当前子字符串无法分割,则跳过 if dp[i] is None:", "\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" # words = [\"a\", \"aa\", \"aaa\", \"aaaa\", \"aaaaa\", \"aaaaaa\",", "s[:i] 能以怎样的方式进行分割 # 如果是 [[]] 则表示开头 # 如果是 [None],则表示还没有访问到,或没有办法进行分割 #", "<filename>Problems/Dynamic Programming/140. Word Break II.py # https://leetcode.com/problems/word-break-ii/ from typing import", "dp[idx] is None: dp[idx] = [] # 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for dic", "set1.issubset(set2): return [] # dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割 # 如果是", "dp = [None] * (len(s) + 1) dp[0] = [[]]", "wordDict: List[str]) -> List[str]: def dfs(s: str, memo={}): if s", "List[str]) -> List[str]: def dfs(s: str, memo={}): if s in", "len(w) + i if idx > len(s): continue if tmp.startswith(w):", "combos = dfs(tmp, memo) for combo in combos: res.append([w] +", "= dfs(tmp, memo) for combo in combos: res.append([w] + combo)", "memo) for combo in combos: res.append([w] + combo) memo[s] =", "= [\"a\", \"aa\", \"aaa\", \"aaaa\", \"aaaaa\", \"aaaaaa\", # \"aaaaaaa\", \"aaaaaaaa\",", "set1 = set(s) set2 = set(''.join(wordDict)) if not set1.issubset(set2): return", "def wordBreak_dfs(self, s: str, wordDict: List[str]) -> List[str]: def dfs(s:", "in wordDict: if s.startswith(w): tmp = s[len(w):] combos = dfs(tmp,", "combos: res.append([w] + combo) memo[s] = res return res return", "[ \"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"])) # text = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"", "Solution: def wordBreak(self, s: str, wordDict: List[str]) -> List[str]: #", "if tmp.startswith(w): if dp[idx] is None: dp[idx] = [] #", "if dp[idx] is None: dp[idx] = [] # 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for", "\"pineapple\"])) # text = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" # words = [\"a\", \"aa\",", "tmp = s[i:] for w in wordDict: idx = len(w)", "s[i:] for w in wordDict: idx = len(w) + i", "= s[i:] for w in wordDict: idx = len(w) +", "dp[-1]] def wordBreak_dfs(self, s: str, wordDict: List[str]) -> List[str]: def", "= set(s) set2 = set(''.join(wordDict)) if not set1.issubset(set2): return []", "str, wordDict: List[str]) -> List[str]: def dfs(s: str, memo={}): if", "[\"a\", \"aa\", \"aaa\", \"aaaa\", \"aaaaa\", \"aaaaaa\", # \"aaaaaaa\", \"aaaaaaaa\", \"aaaaaaaaa\",", "\"aaaa\", \"aaaaa\", \"aaaaaa\", # \"aaaaaaa\", \"aaaaaaaa\", \"aaaaaaaaa\", \"aaaaaaaaaa\"] # print(s.wordBreak(text,", "如果是 [[]] 则表示开头 # 如果是 [None],则表示还没有访问到,或没有办法进行分割 # 如果是 [['a', 'b'],", "print(s.wordBreak_dfs('catsanddog', [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"])) print(s.wordBreak_dfs('pineapplepenapple', [ \"apple\", \"pen\",", "print(s.wordBreak_dfs('pineapplepenapple', [ \"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"])) # text =", "dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割 # 如果是 [[]] 则表示开头 # 如果是", "'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串 dp = [None] * (len(s) + 1)", "\"and\", \"sand\", \"dog\"])) print(s.wordBreak_dfs('pineapplepenapple', [ \"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"]))", "List[str]: # 做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出 set1 = set(s)", "# 如果是 [[]] 则表示开头 # 如果是 [None],则表示还没有访问到,或没有办法进行分割 # 如果是 [['a',", "[[]] 则表示开头 # 如果是 [None],则表示还没有访问到,或没有办法进行分割 # 如果是 [['a', 'b'], ['ab']]", "# 如果当前子字符串无法分割,则跳过 if dp[i] is None: continue tmp = s[i:]", "for w in wordDict: idx = len(w) + i if", "wordDict: if s.startswith(w): tmp = s[len(w):] combos = dfs(tmp, memo)", "return [[]] res = [] for w in wordDict: if", "dfs(s) s = Solution() print(s.wordBreak_dfs('catsanddog', [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"]))", "# https://leetcode.com/problems/word-break-ii/ from typing import List class Solution: def wordBreak(self,", "1) dp[0] = [[]] for i in range(len(s) + 1):", "idx > len(s): continue if tmp.startswith(w): if dp[idx] is None:", "dfs(s: str, memo={}): if s in memo: return memo[s] if", "if s in memo: return memo[s] if len(s) == 0:", "memo[s] if len(s) == 0: return [[]] res = []", "import List class Solution: def wordBreak(self, s: str, wordDict: List[str])", "for w in wordDict: if s.startswith(w): tmp = s[len(w):] combos", "return memo[s] if len(s) == 0: return [[]] res =", "set2 = set(''.join(wordDict)) if not set1.issubset(set2): return [] # dp[i]", "None: dp[idx] = [] # 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for dic in dp[i]:", "set(''.join(wordDict)) if not set1.issubset(set2): return [] # dp[i] 的意思是,子字符串 s[:i]", "[\"cat\", \"cats\", \"and\", \"sand\", \"dog\"])) print(s.wordBreak_dfs('pineapplepenapple', [ \"apple\", \"pen\", \"applepen\",", "s.startswith(w): tmp = s[len(w):] combos = dfs(tmp, memo) for combo", "for combo in combos: res.append([w] + combo) memo[s] = res", "则表示开头 # 如果是 [None],则表示还没有访问到,或没有办法进行分割 # 如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串", "if s.startswith(w): tmp = s[len(w):] combos = dfs(tmp, memo) for", "\"pen\", \"applepen\", \"pine\", \"pineapple\"])) # text = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" # words", "都不包含的字母,则直接退出 set1 = set(s) set2 = set(''.join(wordDict)) if not set1.issubset(set2):", "# dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割 # 如果是 [[]] 则表示开头 #", "words = [\"a\", \"aa\", \"aaa\", \"aaaa\", \"aaaaa\", \"aaaaaa\", # \"aaaaaaa\",", "# words = [\"a\", \"aa\", \"aaa\", \"aaaa\", \"aaaaa\", \"aaaaaa\", #", "set(s) set2 = set(''.join(wordDict)) if not set1.issubset(set2): return [] #", "str, wordDict: List[str]) -> List[str]: # 做一个快速的检查,如果 s 中存在所有 word", "(len(s) + 1) dp[0] = [[]] for i in range(len(s)", "w in wordDict: idx = len(w) + i if idx", "def dfs(s: str, memo={}): if s in memo: return memo[s]", "in dp[i]: dp[idx].append(dic + [w]) if dp[-1] is None: return", "[[]] for i in range(len(s) + 1): # 如果当前子字符串无法分割,则跳过 if", "II.py # https://leetcode.com/problems/word-break-ii/ from typing import List class Solution: def", "# text = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" # words = [\"a\", \"aa\", \"aaa\",", "None: continue tmp = s[i:] for w in wordDict: idx", "None: return [] return [' '.join(res) for res in dp[-1]]", "in combos: res.append([w] + combo) memo[s] = res return res", "wordDict: idx = len(w) + i if idx > len(s):", "+ 1): # 如果当前子字符串无法分割,则跳过 if dp[i] is None: continue tmp", "res in dp[-1]] def wordBreak_dfs(self, s: str, wordDict: List[str]) ->", "s 中存在所有 word 都不包含的字母,则直接退出 set1 = set(s) set2 = set(''.join(wordDict))", "if dp[-1] is None: return [] return [' '.join(res) for", "return [' '.join(res) for res in dp[-1]] def wordBreak_dfs(self, s:", "str, memo={}): if s in memo: return memo[s] if len(s)", "\"dog\"])) print(s.wordBreak_dfs('pineapplepenapple', [ \"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"])) # text", "\"aaa\", \"aaaa\", \"aaaaa\", \"aaaaaa\", # \"aaaaaaa\", \"aaaaaaaa\", \"aaaaaaaaa\", \"aaaaaaaaaa\"] #", "is None: return [] return [' '.join(res) for res in", "continue if tmp.startswith(w): if dp[idx] is None: dp[idx] = []", "dp[0] = [[]] for i in range(len(s) + 1): #", "# 如果是 [None],则表示还没有访问到,或没有办法进行分割 # 如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串 dp", "from typing import List class Solution: def wordBreak(self, s: str,", "dfs(tmp, memo) for combo in combos: res.append([w] + combo) memo[s]", "word 都不包含的字母,则直接退出 set1 = set(s) set2 = set(''.join(wordDict)) if not", "[] return [' '.join(res) for res in dp[-1]] def wordBreak_dfs(self,", "= s[len(w):] combos = dfs(tmp, memo) for combo in combos:", "# 如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串 dp = [None] *", "return [] return [' '.join(res) for res in dp[-1]] def", "def wordBreak(self, s: str, wordDict: List[str]) -> List[str]: # 做一个快速的检查,如果", "+ combo) memo[s] = res return res return dfs(s) s", "# 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for dic in dp[i]: dp[idx].append(dic + [w]) if", "= set(''.join(wordDict)) if not set1.issubset(set2): return [] # dp[i] 的意思是,子字符串", "i if idx > len(s): continue if tmp.startswith(w): if dp[idx]", "text = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" # words = [\"a\", \"aa\", \"aaa\", \"aaaa\",", "Word Break II.py # https://leetcode.com/problems/word-break-ii/ from typing import List class", "memo={}): if s in memo: return memo[s] if len(s) ==", "for i in range(len(s) + 1): # 如果当前子字符串无法分割,则跳过 if dp[i]", "idx = len(w) + i if idx > len(s): continue", "in wordDict: idx = len(w) + i if idx >", "[w]) if dp[-1] is None: return [] return [' '.join(res)", "i in range(len(s) + 1): # 如果当前子字符串无法分割,则跳过 if dp[i] is", "List[str]) -> List[str]: # 做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出 set1", "List class Solution: def wordBreak(self, s: str, wordDict: List[str]) ->", "s = Solution() print(s.wordBreak_dfs('catsanddog', [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"])) print(s.wordBreak_dfs('pineapplepenapple',", "'.join(res) for res in dp[-1]] def wordBreak_dfs(self, s: str, wordDict:", "+ i if idx > len(s): continue if tmp.startswith(w): if", "tmp.startswith(w): if dp[idx] is None: dp[idx] = [] # 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词", "+ 1) dp[0] = [[]] for i in range(len(s) +", "= res return res return dfs(s) s = Solution() print(s.wordBreak_dfs('catsanddog',", "\"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"])) # text = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" #", "wordDict: List[str]) -> List[str]: # 做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出", "将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for dic in dp[i]: dp[idx].append(dic + [w]) if dp[-1]", "len(s): continue if tmp.startswith(w): if dp[idx] is None: dp[idx] =", "\"aaaaa\", \"aaaaaa\", # \"aaaaaaa\", \"aaaaaaaa\", \"aaaaaaaaa\", \"aaaaaaaaaa\"] # print(s.wordBreak(text, words))", "则表示目前已经有两种方式拼出这个子字符串 dp = [None] * (len(s) + 1) dp[0] =", "能以怎样的方式进行分割 # 如果是 [[]] 则表示开头 # 如果是 [None],则表示还没有访问到,或没有办法进行分割 # 如果是", "dp[i] is None: continue tmp = s[i:] for w in", "s: str, wordDict: List[str]) -> List[str]: def dfs(s: str, memo={}):", "is None: dp[idx] = [] # 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for dic in", "in memo: return memo[s] if len(s) == 0: return [[]]", "> len(s): continue if tmp.startswith(w): if dp[idx] is None: dp[idx]", "if idx > len(s): continue if tmp.startswith(w): if dp[idx] is", "= len(w) + i if idx > len(s): continue if", "[] # dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割 # 如果是 [[]] 则表示开头", "in dp[-1]] def wordBreak_dfs(self, s: str, wordDict: List[str]) -> List[str]:", "wordBreak(self, s: str, wordDict: List[str]) -> List[str]: # 做一个快速的检查,如果 s", "dp[idx] = [] # 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for dic in dp[i]: dp[idx].append(dic", "= [] for w in wordDict: if s.startswith(w): tmp =", "dp[idx].append(dic + [w]) if dp[-1] is None: return [] return", "\"pine\", \"pineapple\"])) # text = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" # words = [\"a\",", "如果是 [None],则表示还没有访问到,或没有办法进行分割 # 如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串 dp =", "Solution() print(s.wordBreak_dfs('catsanddog', [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"])) print(s.wordBreak_dfs('pineapplepenapple', [ \"apple\",", "如果当前子字符串无法分割,则跳过 if dp[i] is None: continue tmp = s[i:] for", "len(s) == 0: return [[]] res = [] for w", "['ab']] 则表示目前已经有两种方式拼出这个子字符串 dp = [None] * (len(s) + 1) dp[0]", "* (len(s) + 1) dp[0] = [[]] for i in", "for res in dp[-1]] def wordBreak_dfs(self, s: str, wordDict: List[str])", "-> List[str]: # 做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出 set1 =", "return res return dfs(s) s = Solution() print(s.wordBreak_dfs('catsanddog', [\"cat\", \"cats\",", "List[str]: def dfs(s: str, memo={}): if s in memo: return", "= [] # 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词 for dic in dp[i]: dp[idx].append(dic +", "dic in dp[i]: dp[idx].append(dic + [w]) if dp[-1] is None:", "\"sand\", \"dog\"])) print(s.wordBreak_dfs('pineapplepenapple', [ \"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"])) #", "typing import List class Solution: def wordBreak(self, s: str, wordDict:", "[] for w in wordDict: if s.startswith(w): tmp = s[len(w):]", "if len(s) == 0: return [[]] res = [] for", "range(len(s) + 1): # 如果当前子字符串无法分割,则跳过 if dp[i] is None: continue", "res return dfs(s) s = Solution() print(s.wordBreak_dfs('catsanddog', [\"cat\", \"cats\", \"and\",", "= Solution() print(s.wordBreak_dfs('catsanddog', [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"])) print(s.wordBreak_dfs('pineapplepenapple', [", "s[len(w):] combos = dfs(tmp, memo) for combo in combos: res.append([w]", "if not set1.issubset(set2): return [] # dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割", "[[]] res = [] for w in wordDict: if s.startswith(w):", "如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串 dp = [None] * (len(s)", "# 做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出 set1 = set(s) set2", "w in wordDict: if s.startswith(w): tmp = s[len(w):] combos =", "-> List[str]: def dfs(s: str, memo={}): if s in memo:", "= [None] * (len(s) + 1) dp[0] = [[]] for", "[None],则表示还没有访问到,或没有办法进行分割 # 如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串 dp = [None]", "Break II.py # https://leetcode.com/problems/word-break-ii/ from typing import List class Solution:", "class Solution: def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:", "= [[]] for i in range(len(s) + 1): # 如果当前子字符串无法分割,则跳过", "0: return [[]] res = [] for w in wordDict:", "memo: return memo[s] if len(s) == 0: return [[]] res", "\"applepen\", \"pine\", \"pineapple\"])) # text = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\" # words =", "continue tmp = s[i:] for w in wordDict: idx =" ]
[ "update.call_count) update.reset_mock() for kwarg in expected_kwargs: kwarg['head'] = 'contract@head' self._main_test_helper(", "sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'], ['id'] ),", "'contract@head' self._main_test_helper( ['prog', 'revision', '-m', 'message', '--contract'], 'revision', expected_kwargs )", "mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True", "'+2', '--delta', '3']) def _test_validate_head_files_helper(self, heads, contract_head='', expand_head=''): fake_config =", "'user', sa.Column( 'organization_name', sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name',", "import textwrap from alembic.autogenerate import api as alembic_ag_api from alembic", "'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col')", "+ e_revs self._main_test_helper( ['prog', '--subproject', 'neutron', 'upgrade', 'liberty'], 'upgrade', [{'desc':", "self.assertRaises( SystemExit, cli._compare_labels, fake_revision, {'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels(", "'user', 'organization', ['organization_id'], ['id'] ), alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ), alembic_ops.DropColumnOp(", "in compliance with the License. You may obtain # a", "head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit, cli.validate_head_files, fake_config ) self.assertTrue(self.mock_alembic_err.called)", "= (contract_head == expand_head == '') with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\\", "test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self): labels = {'label1',", "generated by Alembic - please adjust! ### op.drop_constraint('user', 'uq_user_org', type_=None)", "heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] + '\\n')", "test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class TestCli(base.BaseTestCase): def setUp(self): super(TestCli,", "= {'label1', 'label2', 'label3'} fake_revision = FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels,", "''' def _setUp(self): self.ep_backup = {} for proj, ep in", "[mock.call(mock.ANY, func_name, **_append_version_path(kwargs)) for kwargs in exp_kwargs] ) def test_stamp(self):", "'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints = {} for", "validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir,", "mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo',", "as alembic_ag_api from alembic import config as alembic_config from alembic.operations", "versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo')", "FakeRevision(object): path = 'fakepath' def __init__(self, labels=None, down_revision=None, is_branch_point=False): if", "def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self): labels =", "under the License. import copy import os import re import", ") def _validate_cmd(self, cmd): self._main_test_helper( ['prog', cmd], cmd, [{'verbose': False}])", "= True fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]:", "Reserved. # # Licensed under the Apache License, Version 2.0", "= fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch') expected_labels = {'fakebranch'} compare_mock.assert_has_calls(", "fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set up some configs and entrypoints", "m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock):", "def assert_command_fails(self, command): # Avoid cluttering stdout with argparse error", "alembic 0.8.9 added additional leading '# ' before comments return", "self._main_test_helper( ['prog', cmd], cmd, [{'verbose': False}]) self._main_test_helper( ['prog', cmd, '--verbose'],", "'autogenerate': False, 'head': cli._get_branch_head(branch) } for branch in cli.MIGRATION_BRANCHES] for", "= alembic_ops.MigrationScript( 'eced083f5df', # these directives will be split into", "'sql': False, 'autogenerate': True, }] self._main_test_helper( ['prog', 'revision', '--autogenerate', '-m',", "def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError,", "else: os_mock.return_value = True fc.return_value.get_heads.return_value = heads revs = {heads[0]:", "revisions = [FakeRevision() for r in range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY]", "with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--expand --contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self,", "cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir, fake_revision,", "['prog', 'upgrade', '--%s +3' % mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch):", "@mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in", "self.assert_command_fails(['prog', 'upgrade', '--delta', '-2']) def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade', '+2', '--delta',", "self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector()", "expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels') def", "def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True", "[{'verbose': False}]) self._main_test_helper( ['prog', cmd, '--verbose'], cmd, [{'verbose': True}]) def", "sa.PrimaryKeyConstraint('id') ) op.add_column('user', \"\"\" \"\"\"sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', \"\"\"", "from neutron.db import migration from neutron.db.migration import autogen from neutron.db.migration", "set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] )", "self._main_test_helper( ['prog', 'revision', '--autogenerate', '--%s' % branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self):", "adjust! ### op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user', 'organization_name') ### end Alembic", "import api as alembic_ag_api from alembic import config as alembic_config", "into separate # expand/contract scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization', [", "2012 New Dream Network, LLC (DreamHost) # All Rights Reserved.", "'--delta', '-2']) def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3']) def", "= False kwarg['sql'] = True self._main_test_helper( ['prog', 'revision', '--sql', '-m',", "'organization', [ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ] ),", "def test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): # Test that old branchless", "self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno']) def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade', '-2']) def", "False, 'head': 'expand@head' }] self._main_test_helper( ['prog', 'revision', '-m', 'message', '--expand'],", "branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH)", "Dream Network, LLC (DreamHost) # All Rights Reserved. # #", "fake_config ) self.assertTrue(self.mock_alembic_err.called) if contract_head in heads and expand_head in", "fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock): script_dir = mock.Mock()", "expand_head + '\\n')).mock_open if contract_head in heads and expand_head in", "to in writing, software # distributed under the License is", "for kwarg in expected_kwargs: kwarg['head'] = 'contract@head' self._main_test_helper( ['prog', 'revision',", "'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs =", ") self.assertTrue(self.mock_alembic_err.called) if contract_head in heads and expand_head in heads:", "test_branches(self): self._validate_cmd('branches') def test_current(self): self._validate_cmd('current') def test_history(self): self._validate_cmd('history') def test_heads(self):", "or agreed to in writing, software # distributed under the", "from neutron.tests.unit import testlib_api class FakeConfig(object): service = '' class", "test__find_milestone_revisions_one_branch(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone", "('alembic_migrations',) script_location = ':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint =", "if not head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b')", "migration entrypoints map so it can be restored during test", "'' class FakeRevision(object): path = 'fakepath' def __init__(self, labels=None, down_revision=None,", "+ e_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m)) m =", "test_database_sync_revision_no_branches(self): # Test that old branchless approach is still supported", "'head': 'expand@head' }] self._main_test_helper( ['prog', 'revision', '-m', 'message', '--expand'], 'revision',", "self.addCleanup(self.restore) def restore(self): migration_cli.migration_entrypoints = self.ep_backup class TestDbMigration(base.BaseTestCase): def setUp(self):", "Apache License, Version 2.0 (the \"License\"); you may # not", "@mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision", "All Rights Reserved. # # Licensed under the Apache License,", "__init__(self, labels=None, down_revision=None, is_branch_point=False): if not labels: labels = set()", "self._main_test_helper( ['prog', 'stamp', 'foo', '--sql'], 'stamp', [{'revision': 'foo', 'sql': True}]", "update.reset_mock() for kwarg in expected_kwargs: kwarg['head'] = 'contract@head' self._main_test_helper( ['prog',", "added additional leading '# ' before comments return s.replace('\\\\#\\\\#\\\\#\\\\ ',", "mock_root = mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir # Avoid creating", "that old branchless approach is still supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self):", "in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist: fc.assert_called_once_with(fake_config)", "self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH})", "License, Version 2.0 (the \"License\"); you may # not use", "SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean)", "class FakeRevision(object): path = 'fakepath' def __init__(self, labels=None, down_revision=None, is_branch_point=False):", "= copy.copy(ep) self.addCleanup(self.restore) def restore(self): migration_cli.migration_entrypoints = self.ep_backup class TestDbMigration(base.BaseTestCase):", "not use this file except in compliance with the License.", "mock.patch.object(cli, 'update_head_files') as update: if separate_branches: mock.patch('os.path.exists').start() expected_kwargs = [{", "mock.patch.object(sys, 'argv', argv),\\ mock.patch.object(cli, 'run_sanity_checks'),\\ mock.patch.object(cli, 'validate_revisions'): cli.main() def _append_version_path(args):", "configs and entrypoints for tests to chew on self.configs =", "'a.b.c:d') proj_base = cli._get_project_base(config) self.assertEqual('a', proj_base) def test_get_root_versions_dir(self): config =", "['foo', 'bar'] mock_inspector.get_columns.return_value = [{'name': 'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value =", "= [FakeRevision(is_branch_point=True) for i in range(2)] walk_mock.return_value = revisions self.assertRaises(", "'expand' else \"/foo/contract\") migration_script = alembic_ops.MigrationScript( 'eced083f5df', # these directives", "def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed') def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit,", "labels self.down_revision = down_revision self.is_branch_point = is_branch_point self.revision = helpers.get_random_string(10)", "### commands auto generated by Alembic - please adjust! ###", "directives[0] contract = directives[1] self.assertEqual(\"/foo/expand\", expand.version_path) self.assertEqual(\"/foo/contract\", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty())", "}] self._main_test_helper( ['prog', 'revision', '-m', 'message', '--expand'], 'revision', expected_kwargs )", "self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self):", "testlib_api class FakeConfig(object): service = '' class FakeRevision(object): path =", "def test_update_head_files_success(self, *mocks): heads = ['a', 'b'] mock_open_con = self.useFixture(", "'kilo', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': 'kilo+3', 'sql': False}]", "'kilo+3', 'sql': False}] ) def test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade', '--expand'],", "import tools from neutron.tests.unit import testlib_api class FakeConfig(object): service =", "release, branch: ( \"/foo/expand\" if branch == 'expand' else \"/foo/contract\")", "cli.validate_head_files, fake_config ) self.assertTrue(self.mock_alembic_err.called) if contract_head in heads and expand_head", "walk_mock.return_value = c_revs + e_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2,", "for tests to chew on self.configs = [] self.projects =", "adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id')", "def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock): revisions = [FakeRevision(is_branch_point=True) for i", "else: self.assertRaises( SystemExit, cli.validate_head_files, fake_config ) self.assertTrue(self.mock_alembic_err.called) if contract_head in", "expand/contract scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(), primary_key=True),", "map so it can be restored during test cleanup. '''", "= {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)]", "path = 'fakepath' def __init__(self, labels=None, down_revision=None, is_branch_point=False): if not", "from neutron.tests import base from neutron.tests import tools from neutron.tests.unit", "'-m', 'message', '--contract'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self):", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "self._validate_cmd('branches') def test_current(self): self._validate_cmd('current') def test_history(self): self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads')", "config = alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name = project.replace('-', '_') +", "'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs:", "super(TestCli, self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err", "self.assertEqual('a', proj_base) def test_get_root_versions_dir(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir", "\"License\"); you may # not use this file except in", "it can be restored during test cleanup. ''' def _setUp(self):", "test cleanup. ''' def _setUp(self): self.ep_backup = {} for proj,", "TestCli(base.BaseTestCase): def setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd", "['id'] ), alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ), alembic_ops.DropColumnOp( 'user', 'organization_name' )", "'revision': 'kilo+3', 'sql': False}] ) def test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade',", "mocked_root_dir # Avoid creating fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set up", "= revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for revision in revisions]", "tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head + '\\n')).mock_open if contract_head in heads and", "'sql': False}, {'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql': False}] ) def", "matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r", "[FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value =", "cli._get_project_base(config) self.assertEqual('a', proj_base) def test_get_root_versions_dir(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d')", "head_files_not_exist: os_mock.return_value = False else: os_mock.return_value = True fc.return_value.get_heads.return_value =", "= {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with(", "'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql': False}, {'desc': cli.CONTRACT_BRANCH,", "{'label1', 'label2', 'label3'} fake_revision = FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels, fake_revision,", "labels = {'label1', 'label2'} fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'})", "test__compare_labels_fail_unexpected_labels(self): labels = {'label1', 'label2', 'label3'} fake_revision = FakeRevision(labels) self.assertRaises(", "= cli._get_heads_file_path( self.configs[0]) delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list)", "expand.version_path) self.assertEqual(\"/foo/contract\", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s = textwrap.dedent(s)", "alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id') ] ), alembic_ops.DropTableOp('organization') ] ), message='create", "heads: cli.validate_head_files(fake_config) elif head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit, cli.validate_head_files,", "self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs: kwarg['head'] = 'contract@head'", "FakeConfig(object): service = '' class FakeRevision(object): path = 'fakepath' def", "'org_fk', 'user', 'organization', ['organization_id'], ['id'] ), alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ),", "with argparse error messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv', command), mock.patch.object(", "self.assert_command_fails(['prog', 'upgrade', '-2']) def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade', '--delta', '-2']) def", "sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', \"\"\" \"\"\"sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk',", ") def test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade', 'kilo', '--delta', '3'], 'upgrade',", "script_location = ':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project,", "c_revs + e_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m)) m", "ops as alembic_ops from alembic import script as alembic_script import", "config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir)", "'label3'} fake_revision = FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels, fake_revision, {'label1', 'label2'})", "@mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock): revisions =", "mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels = set()", "'') with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\\ mock.patch('os.path.exists') as os_mock: if head_files_not_exist:", "module_name = project.replace('-', '_') + '.db.migration' attrs = ('alembic_migrations',) script_location", "kwargs in exp_kwargs] ) def test_stamp(self): self._main_test_helper( ['prog', 'stamp', 'foo'],", "{'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] )", "'user'), alembic_ops.DropColumnOp('user', 'organization_id') ] ), alembic_ops.DropTableOp('organization') ] ), message='create the", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "'--delta', '3'], 'upgrade', [{'desc': None, 'revision': 'kilo+3', 'sql': False}] )", "column_name='meh')) class TestCli(base.BaseTestCase): def setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli,", "entrypoint = pkg_resources.EntryPoint(project, module_name, attrs=attrs) migration_cli.migration_entrypoints[project] = entrypoint def _main_test_helper(self,", "m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka')", "alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for rev in revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir)))", "'autogenerate': False, 'head': 'expand@head' }] self._main_test_helper( ['prog', 'revision', '-m', 'message',", "self._main_test_helper( ['prog', 'upgrade', '--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql':", "True, }] self._main_test_helper( ['prog', 'revision', '--autogenerate', '-m', 'message'], 'revision', expected_kwargs", "revision) for revision in revisions] ) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def", "[{ 'message': 'message', 'sql': False, 'autogenerate': False, 'head': 'expand@head' }]", "'message': 'message', 'sql': True, 'autogenerate': False, 'head': cli._get_branch_head(branch) } for", "import helpers from oslo_utils import fileutils import pkg_resources import sqlalchemy", "['prog', 'upgrade', 'kilo', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': 'kilo+3',", "False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--expand", "expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c',", "Version 2.0 (the \"License\"); you may # not use this", "@mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in", "copy of the migration entrypoints map so it can be", "# these directives will be split into separate # expand/contract", "cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql': False}] ) def assert_command_fails(self, command): #", "'revision': 'contract@head', 'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs", "alembic_ops.MigrationScript( 'eced083f5df', # these directives will be split into separate", "m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) class TestSafetyChecks(base.BaseTestCase): def test_validate_revisions(self,", "i in range(10)] walk_mock.return_value = revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision)", "sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', \"\"\" \"\"\"sa.Column('organization_id', sa.Integer(), nullable=True))", "alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id') ] ), alembic_ops.DropTableOp('organization')", "self.configs = [] self.projects = ('neutron', 'networking-foo', 'neutron-fwaas') ini =", "def test_database_sync_revision_no_branches(self): # Test that old branchless approach is still", "commands auto generated by Alembic - please adjust! ### op.create_table('organization',", "fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision =", "down_revision self.is_branch_point = is_branch_point self.revision = helpers.get_random_string(10) self.module = mock.MagicMock()", "mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv', command), mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main)", "'revision': c_revs[1].revision, 'sql': False}] ) def assert_command_fails(self, command): # Avoid", "(fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision", "mock_open_con.return_value.write.assert_called_with( heads[0] + '\\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\\n') old_head_file = cli._get_head_file_path(", "'sql': False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade',", "False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for", "self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head + '\\n')).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path(", "'contract@head', 'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs =", "'organization_name', sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name', 'organization_name'] ),", "sa.Column( 'organization_name', sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name', 'organization_name']", "{heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0]", "import base from neutron.tests import tools from neutron.tests.unit import testlib_api", "def test__get_branch_points(self, walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for i in range(50)]", "primary_key=True), sa.Column('name', sa.String(50), nullable=False) ] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp(", "self._main_test_helper( ['prog', 'revision', '--sql', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects),", "with mock.patch.object(sys, 'argv', argv),\\ mock.patch.object(cli, 'run_sanity_checks'),\\ mock.patch.object(cli, 'validate_revisions'): cli.main() def", "sa.Column('name', sa.String(50), nullable=False) ] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp( 'user',", "= mock.patch('alembic.util.err').start() self.mock_alembic_warn = mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect = SystemExit def mocked_root_dir(cfg):", "'user', 'uq_user_org' ), alembic_ops.DropColumnOp( 'user', 'organization_name' ) ] ) ]", "fake_revision = FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels, fake_revision, {'label1', 'label2'}) @mock.patch.object(cli,", "import fileutils import pkg_resources import sqlalchemy as sa from testtools", "{'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql': False}] ) def assert_command_fails(self, command):", "test_update_head_files_success(self, *mocks): heads = ['a', 'b'] mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path(", "= ['a', 'b'] mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex =", "as fc,\\ mock.patch('os.path.exists') as os_mock: if head_files_not_exist: os_mock.return_value = False", "= [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value", "'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock): revisions = [FakeRevision()", "compliance with the License. You may obtain # a copy", "labels: labels = set() self.branch_labels = labels self.down_revision = down_revision", "def test_stamp(self): self._main_test_helper( ['prog', 'stamp', 'foo'], 'stamp', [{'revision': 'foo', 'sql':", "def test_get_root_versions_dir(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir = cli._get_root_versions_dir(config)", "[migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives ) expand = directives[0] contract", "from alembic.autogenerate import api as alembic_ag_api from alembic import config", "and not args['autogenerate']: args['version_path'] = mock.ANY return args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY,", "self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': False,", "def mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root = mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect", "# # Unless required by applicable law or agreed to", "in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock):", "self.revision = helpers.get_random_string(10) self.module = mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a", "[migration.LIBERTY] walk_mock.return_value = c_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1,", "revision in revisions] ) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self,", "class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy of the migration entrypoints map", "script as alembic_script import fixtures import mock from neutron_lib.utils import", "expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate': True, }]", "def test__compare_labels_ok(self): labels = {'label1', 'label2'} fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision,", "= os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir, fake_revision, label=branch) @mock.patch.object(cli,", "'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists') def", "compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli,", "update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': True, 'autogenerate': False,", "test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade', '--sql', 'head'], 'upgrade', [{'desc': None, 'revision':", "@mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock): revisions = [FakeRevision(is_branch_point=True) for", "'upgrade', '--%s +3' % mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch): with", "self.ep_backup class TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline =", "), alembic_ops.DropTableOp('organization') ] ), message='create the organization table and '", "'liberty') self.assertEqual(2, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions')", "os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root = mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir #", "range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs m", "with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s +3' % mode], 'upgrade')", "walk_mock, validate_mock): revisions = [FakeRevision() for i in range(10)] walk_mock.return_value", "'argv', argv),\\ mock.patch.object(cli, 'run_sanity_checks'),\\ mock.patch.object(cli, 'validate_revisions'): cli.main() def _append_version_path(args): args", "'autogenerate' in args and not args['autogenerate']: args['version_path'] = mock.ANY return", "expand_head=''): fake_config = self.configs[0] head_files_not_exist = (contract_head == expand_head ==", "self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self):", "fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir =", "_append_version_path(args): args = copy.copy(args) if 'autogenerate' in args and not", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "= revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] + '\\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\\n')", "'sql': True}] ) def _validate_cmd(self, cmd): self._main_test_helper( ['prog', cmd], cmd,", "###\"\"\") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = (\"\"\"\\ ### commands auto", "test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade', '--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head',", "mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head + '\\n')).mock_open if contract_head", "'argv', command), mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog',", "will be split into separate # expand/contract scripts alembic_ops.UpgradeOps( ops=[", "_setUp(self): self.ep_backup = {} for proj, ep in migration_cli.migration_entrypoints.items(): self.ep_backup[proj]", "argv, func_name, exp_kwargs=[{}]): with mock.patch.object(sys, 'argv', argv),\\ mock.patch.object(cli, 'run_sanity_checks'),\\ mock.patch.object(cli,", "expand = directives[0] contract = directives[1] self.assertEqual(\"/foo/expand\", expand.version_path) self.assertEqual(\"/foo/contract\", contract.version_path)", "if not labels: labels = set() self.branch_labels = labels self.down_revision", "test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade', '--contract'], 'upgrade', [{'desc':", "[{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql': False}, {'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision,", "'upgrade', '+2', '--delta', '3']) def _test_validate_head_files_helper(self, heads, contract_head='', expand_head=''): fake_config", "= FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def", "= FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision,", "may obtain # a copy of the License at #", "ops=[ alembic_ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False)", "[{ 'message': 'message', 'sql': False, 'autogenerate': True, }] self._main_test_helper( ['prog',", "'--%s +3' % mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit):", "Unless required by applicable law or agreed to in writing,", "from neutron.conf.db import migration_cli from neutron.db import migration from neutron.db.migration", "self._main_test_helper( ['prog', cmd, '--verbose'], cmd, [{'verbose': True}]) def test_branches(self): self._validate_cmd('branches')", "'fakepath' def __init__(self, labels=None, down_revision=None, is_branch_point=False): if not labels: labels", "fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels =", "self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self): labels = {'label1', 'label2'}", "auto generated by Alembic - please adjust! ### op.create_table('organization', sa.Column('id',", "['organization_id'], ['id']) ### end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex)))", "test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect = lambda cfg, release, branch: ( \"/foo/expand\"", "= ('neutron', 'networking-foo', 'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints", "neutron.tests import base from neutron.tests import tools from neutron.tests.unit import", "copy import os import re import sys import textwrap from", "ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization',", "['prog', 'upgrade', '--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql': False}]", "self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli,", "mock.ANY return args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **_append_version_path(kwargs)) for kwargs in", "= [{ 'message': 'message', 'sql': False, 'autogenerate': True, }] self._main_test_helper(", "[{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql': False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self): with", "scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name',", "['prog', 'upgrade', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': '+3', 'sql':", "c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty',", "test__get_branch_points(self, walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for i in range(50)] walk_mock.return_value", "'+3', 'sql': False}] ) def test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade', 'kilo',", "i in range(2)] walk_mock.return_value = revisions self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0])", "either express or implied. See the # License for the", "mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo', 'bar'] mock_inspector.get_columns.return_value = [{'name':", "e_revs[3].revision, 'sql': False}, {'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql': False}] )", "may # not use this file except in compliance with", "as alembic_config from alembic.operations import ops as alembic_ops from alembic", ") def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--expand --contract'],", "= [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs", "'sql': False}] ) def assert_command_fails(self, command): # Avoid cluttering stdout", ") ] ), # these will be discarded alembic_ops.DowngradeOps( ops=[", "'validate_head_files') as validate: self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self,", "alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'],", "by Alembic - please adjust! ### op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user',", "heads[0] + '\\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\\n') old_head_file = cli._get_head_file_path( self.configs[0])", "'--subproject', 'neutron', 'upgrade', 'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql':", "def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper( ['prog',", "['prog', 'upgrade', '--%s revision1' % mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self, mode):", ") self._main_test_helper( ['prog', 'stamp', 'foo', '--sql'], 'stamp', [{'revision': 'foo', 'sql':", "r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs m", "@mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock): revisions = [FakeRevision() for", "FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head", "validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() branch = cli.MIGRATION_BRANCHES[0]", "alembic_config from alembic.operations import ops as alembic_ops from alembic import", "def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade', '-2']) def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade', '--delta',", "i in range(50)] walk_mock.return_value = revisions script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0])", "(fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock): script_dir =", "'organization_name' ) ] ) ] ), # these will be", "expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message',", "= [{ 'message': 'message', 'sql': False, 'autogenerate': False, 'head': 'expand@head'", "ops=[ alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name', sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org',", "self.assertEqual(\"/foo/expand\", expand.version_path) self.assertEqual(\"/foo/contract\", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s =", "self._main_test_helper( ['prog', 'upgrade', '--sql', 'head'], 'upgrade', [{'desc': None, 'revision': 'heads',", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "= mocked_root_dir # Avoid creating fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set", "fake_config), expand_head + '\\n')).mock_open if contract_head in heads and expand_head", "'foo', 'sql': False}] ) self._main_test_helper( ['prog', 'stamp', 'foo', '--sql'], 'stamp',", "walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for i in range(50)] walk_mock.return_value =", "['prog', 'upgrade', '--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql': False}]", "'''Create a copy of the migration entrypoints map so it", "= directives[1] self.assertEqual(\"/foo/expand\", expand.version_path) self.assertEqual(\"/foo/contract\", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s):", "label='fakebranch') expected_labels = {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in", "from neutron.tests import tools from neutron.tests.unit import testlib_api class FakeConfig(object):", "fake_revision, label='fakebranch') expected_labels = {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision", "update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate':", "'organization_name'] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id') ]", "is_branch_point=False): if not labels: labels = set() self.branch_labels = labels", "Network, LLC (DreamHost) # All Rights Reserved. # # Licensed", "= cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo') expected", "validate_mock): revisions = [FakeRevision() for i in range(10)] walk_mock.return_value =", "), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp(", "False else: os_mock.return_value = True fc.return_value.get_heads.return_value = heads revs =", "args = copy.copy(args) if 'autogenerate' in args and not args['autogenerate']:", "self._main_test_helper( ['prog', 'revision', '-m', 'message', '--contract'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects),", "'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self):", "cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch') expected_labels = {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels)", "= labels self.down_revision = down_revision self.is_branch_point = is_branch_point self.revision =", "= [FakeRevision() for r in range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value", "(DreamHost) # All Rights Reserved. # # Licensed under the", "} for branch in cli.MIGRATION_BRANCHES] for kwarg in expected_kwargs: kwarg['autogenerate']", "self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists')", "# these will be discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user', sa.Column(", "sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ] ), alembic_ops.ModifyTableOps( 'user', ops=[", "= self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with", "{'label1', 'label2'} fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self):", "revisions = [FakeRevision(is_branch_point=True) for i in range(2)] walk_mock.return_value = revisions", "for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels(", "fake_revision, label=None) expected_labels = set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision", "for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self,", "update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): # Test that old", "= True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column(", "= 'fakepath' def __init__(self, labels=None, down_revision=None, is_branch_point=False): if not labels:", "'message', 'sql': True, 'autogenerate': False, 'head': cli._get_branch_head(branch) } for branch", "'expand@head', 'sql': False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog',", "please adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False),", "old_head_file = cli._get_head_file_path( self.configs[0]) old_heads_file = cli._get_heads_file_path( self.configs[0]) delete_if_exists =", "walk_mock, validate_mock): revisions = [FakeRevision(is_branch_point=True) for i in range(2)] walk_mock.return_value", "script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels", "FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision", "self.mock_alembic_warn = mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect = SystemExit def mocked_root_dir(cfg): return os.path.join('/fake/dir',", "{'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def", "self.module = mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy of the", "FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels, fake_revision, {'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels') def", "revisions m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0],", "cli.main() def _append_version_path(args): args = copy.copy(args) if 'autogenerate' in args", "= mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value =", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "['organization_id'], ['id'] ), alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ), alembic_ops.DropColumnOp( 'user', 'organization_name'", "[ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ] ), alembic_ops.ModifyTableOps(", "def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3']) def _test_validate_head_files_helper(self, heads,", "if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect =", "def setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode', return_value=False).start()", "s.replace('\\\\#\\\\#\\\\#\\\\ ', '(# )?### ') expected_regex = (\"\"\"\\ ### commands", "self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value = False", "mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root = mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect =", "test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock): revisions = [FakeRevision() for i in", "textwrap from alembic.autogenerate import api as alembic_ag_api from alembic import", "+ '\\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\\n') old_head_file = cli._get_head_file_path( self.configs[0]) old_heads_file", "cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for revision in revisions] ) @mock.patch.object(cli,", "def _append_version_path(args): args = copy.copy(args) if 'autogenerate' in args and", "= self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head + '\\n')).mock_open mock_open_ex = self.useFixture(", "'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def", "test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self):", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "def test_branches(self): self._validate_cmd('branches') def test_current(self): self._validate_cmd('current') def test_history(self): self._validate_cmd('history') def", "fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch') expected_labels = {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision,", "['prog', 'upgrade', '--sql', 'head'], 'upgrade', [{'desc': None, 'revision': 'heads', 'sql':", "expand_head == '') with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\\ mock.patch('os.path.exists') as os_mock:", "fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(", "ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id') ] ), alembic_ops.DropTableOp('organization') ] ),", "range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs m = cli._find_milestone_revisions(self.configs[0],", "import fixtures import mock from neutron_lib.utils import helpers from oslo_utils", "def test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision()", "def test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep)", "= 'contract@head' self._main_test_helper( ['prog', 'revision', '-m', 'message', '--contract'], 'revision', expected_kwargs", "[{'name': 'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector()", "in revisions] ) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock,", "'organization_name') ### end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions')", "fake_revision.path = os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir, fake_revision, label=branch)", "self.mock_alembic_err.side_effect = SystemExit def mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root =", "self._validate_cmd('current') def test_history(self): self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads') def test_check_migration(self): with", "= mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels =", "alembic import config as alembic_config from alembic.operations import ops as", "\"\"\"'organization', ['organization_id'], ['id']) ### end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops),", "for rev in revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path') def", "stdout with argparse error messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv', command),", "= [FakeRevision() for i in range(10)] walk_mock.return_value = revisions cli.validate_revisions(self.configs[0])", "sqlalchemy as sa from testtools import matchers from neutron.conf.db import", "fake_config), contract_head + '\\n')).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head", "@mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir = mock.Mock() fake_revision", "@mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for i in", "op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user',", "self._main_test_helper( ['prog', 'upgrade', '--expand --contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self, mode): with", "mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open", "before comments return s.replace('\\\\#\\\\#\\\\#\\\\ ', '(# )?### ') expected_regex =", "'upgrade', '--%s revision1' % mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self, mode): with", "def test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock): revisions = [FakeRevision() for i", "True fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}", "kwarg in expected_kwargs: kwarg['head'] = 'contract@head' self._main_test_helper( ['prog', 'revision', '-m',", "the specific language governing permissions and limitations # under the", "self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock): revisions = [FakeRevision() for", "revisions script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for rev in revisions", "mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),", "'b'] mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path(", "organization table and ' 'replace user.organization_name' ) directives = [migration_script]", "approach is still supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade',", "under the Apache License, Version 2.0 (the \"License\"); you may", "test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self, *mocks):", "in revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives(self, get_version_branch_path):", "cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno']) def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog',", "expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self, *mocks): heads = ['a', 'b']", "Alembic - please adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name',", "in heads: cli.validate_head_files(fake_config) elif head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit,", "creating fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set up some configs and", "mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self): config = alembic_config.Config()", "def _test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli, 'update_head_files') as update: if separate_branches:", "exp_kwargs=[{}]): with mock.patch.object(sys, 'argv', argv),\\ mock.patch.object(cli, 'run_sanity_checks'),\\ mock.patch.object(cli, 'validate_revisions'): cli.main()", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self,", "def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]): with mock.patch.object(sys, 'argv', argv),\\ mock.patch.object(cli,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "mock from neutron_lib.utils import helpers from oslo_utils import fileutils import", "the License. import copy import os import re import sys", "restore(self): migration_cli.migration_entrypoints = self.ep_backup class TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration, self).setUp()", "test__compare_labels_ok(self): labels = {'label1', 'label2'} fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision, {'label1',", "def _get_regex(s): s = textwrap.dedent(s) s = re.escape(s) # alembic", "walk_mock.return_value = revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for revision in", "tools from neutron.tests.unit import testlib_api class FakeConfig(object): service = ''", "setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start()", ") ] ) ] ), # these will be discarded", "required by applicable law or agreed to in writing, software", "'branches') self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli, 'update_head_files') as", "% mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog',", "\"/foo/contract\") migration_script = alembic_ops.MigrationScript( 'eced083f5df', # these directives will be", "mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self):", "in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r", "self.configs[0]) old_heads_file = cli._get_heads_file_path( self.configs[0]) delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list)", "- please adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50),", "def _test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s +3'", "split into separate # expand/contract scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization',", "for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH})", "_test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli, 'update_head_files') as update: if separate_branches: mock.patch('os.path.exists').start()", "self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) class TestSafetyChecks(base.BaseTestCase):", "agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\"", "= [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone", "script_dir = mock.Mock() fake_revision = FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision)", "pkg_resources import sqlalchemy as sa from testtools import matchers from", "CONDITIONS OF ANY KIND, either express or implied. See the", "'revision': e_revs[3].revision, 'sql': False}, {'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql': False}]", "cli.validate_head_files(fake_config) elif head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit, cli.validate_head_files, fake_config", "= 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed')", "= mock.Mock() fake_revision = FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli,", "cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self):", "cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs", "alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name', sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp(", "revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] + '\\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\\n') old_head_file", "branch == 'expand' else \"/foo/contract\") migration_script = alembic_ops.MigrationScript( 'eced083f5df', #", "kwarg in expected_kwargs: kwarg['autogenerate'] = False kwarg['sql'] = True self._main_test_helper(", "alembic_ops.DropColumnOp('user', 'organization_id') ] ), alembic_ops.DropTableOp('organization') ] ), message='create the organization", "self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed') def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed')", "for i in range(50)] walk_mock.return_value = revisions script_dir = alembic_script.ScriptDirectory.from_config(", "r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs +", "mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\\ mock.patch('os.path.exists') as os_mock: if head_files_not_exist: os_mock.return_value =", "def test_get_project_base(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base = cli._get_project_base(config)", "def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self,", "alembic.autogenerate import api as alembic_ag_api from alembic import config as", "self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade', '--sql', 'head'], 'upgrade', [{'desc':", "= mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self): config =", "= '' class FakeRevision(object): path = 'fakepath' def __init__(self, labels=None,", "lambda cfg, release, branch: ( \"/foo/expand\" if branch == 'expand'", "revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self,", "cluttering stdout with argparse error messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv',", "testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s +3' % mode], 'upgrade') def", "+ '\\n')).mock_open if contract_head in heads and expand_head in heads:", "self.down_revision = down_revision self.is_branch_point = is_branch_point self.revision = helpers.get_random_string(10) self.module", "False}] ) def test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade', 'kilo', '--delta', '3'],", "for proj, ep in migration_cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore) def", "alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk',", "s = re.escape(s) # alembic 0.8.9 added additional leading '#", "def test__compare_labels_fail_unexpected_labels(self): labels = {'label1', 'label2', 'label3'} fake_revision = FakeRevision(labels)", "mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir # Avoid creating fake directories", "_validate_cmd(self, cmd): self._main_test_helper( ['prog', cmd], cmd, [{'verbose': False}]) self._main_test_helper( ['prog',", "self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno']) def test_upgrade_negative_relative_revision_fails(self):", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "] ), alembic_ops.DropTableOp('organization') ] ), message='create the organization table and", "self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **_append_version_path(kwargs)) for kwargs in exp_kwargs] ) def", "revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = revisions m = cli._find_milestone_revisions(self.configs[0], 'liberty')", "fixtures import mock from neutron_lib.utils import helpers from oslo_utils import", "table and ' 'replace user.organization_name' ) directives = [migration_script] autogen.process_revision_directives(", "= True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh'))", "self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed') def test_get_subproject_base_not_installed(self):", "cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql': False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit):", "directories mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set up some configs and entrypoints for", "self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock): revisions =", "self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class TestCli(base.BaseTestCase): def setUp(self): super(TestCli, self).setUp()", "with mock.patch.object(cli, 'update_head_files') as update: if separate_branches: mock.patch('os.path.exists').start() expected_kwargs =", "See the # License for the specific language governing permissions", "self._validate_cmd('heads') def test_check_migration(self): with mock.patch.object(cli, 'validate_head_files') as validate: self._main_test_helper(['prog', 'check_migration'],", "['id']) ### end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex", "proj_base = cli._get_project_base(config) self.assertEqual('a', proj_base) def test_get_root_versions_dir(self): config = alembic_config.Config()", "def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision)", "['a', 'b'] mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex = self.useFixture(", "'downgrade', '--sql', 'juno']) def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade', '-2']) def test_upgrade_negative_delta_fails(self):", "False}] ) def assert_command_fails(self, command): # Avoid cluttering stdout with", "migration.schema_has_column, 'foo', 'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class", "False}]) self._main_test_helper( ['prog', cmd, '--verbose'], cmd, [{'verbose': True}]) def test_branches(self):", "law or agreed to in writing, software # distributed under", "script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch')", "' 'replace user.organization_name' ) directives = [migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(),", "'--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql': False}] ) def", "= FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels, fake_revision, {'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels')", "alembic_ops.DropTableOp('organization') ] ), message='create the organization table and ' 'replace", "'foo', '--sql'], 'stamp', [{'revision': 'foo', 'sql': True}] ) def _validate_cmd(self,", "= alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name = project.replace('-', '_') + '.db.migration'", "False}] ) self._main_test_helper( ['prog', 'stamp', 'foo', '--sql'], 'stamp', [{'revision': 'foo',", "True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo',", "Avoid creating fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set up some configs", "self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): # Test that old branchless approach is", "'message', 'sql': False, 'autogenerate': True, }] self._main_test_helper( ['prog', 'revision', '--autogenerate',", "'stamp', [{'revision': 'foo', 'sql': True}] ) def _validate_cmd(self, cmd): self._main_test_helper(", "as alembic_script import fixtures import mock from neutron_lib.utils import helpers", "as validate: self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True):", "still supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade', '--sql', 'head'],", "len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock): revisions = [FakeRevision() for r", "self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s = textwrap.dedent(s) s = re.escape(s)", "and limitations # under the License. import copy import os", "tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head + '\\n')).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config),", "Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = (\"\"\"\\ ###", "'run_sanity_checks'),\\ mock.patch.object(cli, 'validate_revisions'): cli.main() def _append_version_path(args): args = copy.copy(args) if", "self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade', '--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH,", "fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head +", "mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s revision1' % mode],", "will be discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name', sa.String(50),", "('neutron', 'networking-foo', 'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints =", "from neutron_lib.utils import helpers from oslo_utils import fileutils import pkg_resources", "'_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock): revisions = [FakeRevision()", "the organization table and ' 'replace user.organization_name' ) directives =", "entrypoints for tests to chew on self.configs = [] self.projects", "alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ), alembic_ops.DropColumnOp( 'user', 'organization_name' ) ] )", "[{'verbose': True}]) def test_branches(self): self._validate_cmd('branches') def test_current(self): self._validate_cmd('current') def test_history(self):", "= [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs m = cli._find_milestone_revisions(self.configs[0],", "'\\n')).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head + '\\n')).mock_open if", "'--expand'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in", "' before comments return s.replace('\\\\#\\\\#\\\\#\\\\ ', '(# )?### ') expected_regex", "test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand')", "[migration.LIBERTY] walk_mock.return_value = c_revs + e_revs self._main_test_helper( ['prog', '--subproject', 'neutron',", "'head'], 'upgrade', [{'desc': None, 'revision': 'heads', 'sql': True}] ) def", "'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self):", "contract_head in heads and expand_head in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with(", "# # Licensed under the Apache License, Version 2.0 (the", "walk_mock.return_value = c_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m))", "= is_branch_point self.revision = helpers.get_random_string(10) self.module = mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture):", "sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', \"\"\"", "test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone", "api as alembic_ag_api from alembic import config as alembic_config from", "'--autogenerate', '--%s' % branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def", "and entrypoints for tests to chew on self.configs = []", "'uq_user_org', 'user', ['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'),", "labels = {'label1', 'label2', 'label3'} fake_revision = FakeRevision(labels) self.assertRaises( SystemExit,", "mock.patch.object(cli, 'validate_head_files') as validate: self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count) def", "'uq_user_org', type_=None) op.drop_column('user', 'organization_name') ### end Alembic commands ###\"\"\") self.assertThat(", "test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d') @mock.patch.object(fileutils,", "True, 'autogenerate': False, 'head': cli._get_branch_head(branch) } for branch in cli.MIGRATION_BRANCHES]", "), alembic_ops.DropColumnOp( 'user', 'organization_name' ) ] ) ] ), #", "def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision)", "True}] ) def _validate_cmd(self, cmd): self._main_test_helper( ['prog', cmd], cmd, [{'verbose':", "['prog', '--subproject', 'neutron', 'upgrade', 'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision,", "self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')", "'b'], contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a',", "self, walk_mock, validate_mock): revisions = [FakeRevision(is_branch_point=True) for i in range(2)]", "= ['foo', 'bar'] mock_inspector.get_columns.return_value = [{'name': 'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value", "= SystemExit def mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root = mock.patch.object(cli,", "def _test_validate_head_files_helper(self, heads, contract_head='', expand_head=''): fake_config = self.configs[0] head_files_not_exist =", "test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade', '--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head',", "self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit, cli.validate_head_files, fake_config ) self.assertTrue(self.mock_alembic_err.called) if contract_head", "config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base = cli._get_project_base(config) self.assertEqual('a', proj_base)", "True}] ) def test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade', '--delta', '3'], 'upgrade',", "s = textwrap.dedent(s) s = re.escape(s) # alembic 0.8.9 added", "'replace user.organization_name' ) directives = [migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "expected_regex = (\"\"\"\\ ### commands auto generated by Alembic -", "- please adjust! ### op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user', 'organization_name') ###", "), message='create the organization table and ' 'replace user.organization_name' )", "test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir", "'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo', 'bar']", "= [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs self._main_test_helper( ['prog', '--subproject',", "[mock.call(mock.ANY, revision) for revision in revisions] ) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions')", "test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): # Test that old branchless approach", "self.ep_backup = {} for proj, ep in migration_cli.migration_entrypoints.items(): self.ep_backup[proj] =", "revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for i in range(50)] walk_mock.return_value = revisions", "'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints = {} for project in self.projects: config", "= self.ep_backup class TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline", "def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract')", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints = {} for project in self.projects: config =", ") directives = [migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives ) expand", "fake_revision = FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions')", "= cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock):", "True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def", "as fc: fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]:", "additional leading '# ' before comments return s.replace('\\\\#\\\\#\\\\#\\\\ ', '(#", "'user', ['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user',", "_test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s +3' %", "'_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision =", "expected_labels = {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision,", "import pkg_resources import sqlalchemy as sa from testtools import matchers", "head_files_not_exist = (contract_head == expand_head == '') with mock.patch('alembic.script.ScriptDirectory.from_config') as", "###\"\"\") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs =", "cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql': False}, {'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql':", "'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno']) def", "--contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade',", "cli._get_heads_file_path( self.configs[0]) delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def", "test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def", "self.assertRaises( SystemExit, cli.validate_head_files, fake_config ) self.assertTrue(self.mock_alembic_err.called) if contract_head in heads", "return s.replace('\\\\#\\\\#\\\\#\\\\ ', '(# )?### ') expected_regex = (\"\"\"\\ ###", "'autogenerate': True, }] self._main_test_helper( ['prog', 'revision', '--autogenerate', '-m', 'message'], 'revision',", "'validate_revisions'): cli.main() def _append_version_path(args): args = copy.copy(args) if 'autogenerate' in", "down_revision=None, is_branch_point=False): if not labels: labels = set() self.branch_labels =", "self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value = heads revs =", "] ), # these will be discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp(", "self._main_test_helper( ['prog', 'upgrade', 'kilo', '--delta', '3'], 'upgrade', [{'desc': None, 'revision':", "= cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises(", "'upgrade', [{'desc': None, 'revision': 'heads', 'sql': True}] ) def test_upgrade_delta(self):", "ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints = {} for project", "test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed') def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base,", "'--contract'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() def", "= revs.__getitem__ mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head + '\\n')).mock_open", "self.assertEqual(set(rev for rev in revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path')", "import testlib_api class FakeConfig(object): service = '' class FakeRevision(object): path", "pkg_resources.EntryPoint(project, module_name, attrs=attrs) migration_cli.migration_entrypoints[project] = entrypoint def _main_test_helper(self, argv, func_name,", "len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r", "mock.patch('alembic.util.err').start() self.mock_alembic_warn = mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect = SystemExit def mocked_root_dir(cfg): return", "+ '\\n') old_head_file = cli._get_head_file_path( self.configs[0]) old_heads_file = cli._get_heads_file_path( self.configs[0])", "['prog', 'revision', '-m', 'message', '--expand'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count)", "range(50)] walk_mock.return_value = revisions script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for", "directives will be split into separate # expand/contract scripts alembic_ops.UpgradeOps(", "= revisions script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for rev in", "mock.patch('os.path.exists') as os_mock: if head_files_not_exist: os_mock.return_value = False else: os_mock.return_value", "import config as alembic_config from alembic.operations import ops as alembic_ops", "None, 'revision': 'kilo+3', 'sql': False}] ) def test_upgrade_expand(self): self._main_test_helper( ['prog',", "message='create the organization table and ' 'replace user.organization_name' ) directives", "op.add_column('user', \"\"\" \"\"\"sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', \"\"\" \"\"\"'organization', ['organization_id'],", "helpers from oslo_utils import fileutils import pkg_resources import sqlalchemy as", "test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3']) def _test_validate_head_files_helper(self, heads, contract_head='',", "import cli from neutron.tests import base from neutron.tests import tools", "in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock): script_dir", "op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user', 'organization_name') ### end Alembic commands ###\"\"\")", "with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\\ mock.patch('os.path.exists') as os_mock: if head_files_not_exist: os_mock.return_value", "mock.Mock(), directives ) expand = directives[0] contract = directives[1] self.assertEqual(\"/foo/expand\",", "mock.Mock() fake_revision = FakeRevision() branch = cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path',", "proj_base) def test_get_root_versions_dir(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir =", "% mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog',", "*mocks): heads = ['a', 'b'] mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open", "test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade', 'kilo', '--delta', '3'], 'upgrade', [{'desc': None,", "'label2'} fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels", "# under the License. import copy import os import re", "= cli._get_head_file_path( self.configs[0]) old_heads_file = cli._get_heads_file_path( self.configs[0]) delete_if_exists = mocks[0]", ") @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock): script_dir = mock.Mock() fake_revision", "cmd, [{'verbose': True}]) def test_branches(self): self._validate_cmd('branches') def test_current(self): self._validate_cmd('current') def", "'organization_id') ] ), alembic_ops.DropTableOp('organization') ] ), message='create the organization table", "= set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)]", "mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision',", "'liberty') self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) class", ") @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r", "= cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'liberty',", "OF ANY KIND, either express or implied. See the #", "project.replace('-', '_') + '.db.migration' attrs = ('alembic_migrations',) script_location = ':'.join([module_name,", "import mock from neutron_lib.utils import helpers from oslo_utils import fileutils", "by Alembic - please adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False),", "func_name, **_append_version_path(kwargs)) for kwargs in exp_kwargs] ) def test_stamp(self): self._main_test_helper(", "neutron.db.migration import cli from neutron.tests import base from neutron.tests import", "{} for proj, ep in migration_cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore)", "= revisions self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock):", "in writing, software # distributed under the License is distributed", "'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli, 'update_head_files')", "fileutils import pkg_resources import sqlalchemy as sa from testtools import", "_get_regex(s): s = textwrap.dedent(s) s = re.escape(s) # alembic 0.8.9", "SystemExit, cli._compare_labels, fake_revision, {'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self,", "tests to chew on self.configs = [] self.projects = ('neutron',", "nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps( 'user',", "cli from neutron.tests import base from neutron.tests import tools from", "def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]", "self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector", "= FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels = {'label1',", "'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0,", "with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s revision1' % mode], 'upgrade')", "'3'], 'upgrade', [{'desc': None, 'revision': 'kilo+3', 'sql': False}] ) def", "= mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo', 'bar'] mock_inspector.get_columns.return_value = [{'name': 'foo_column'},", "'\\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\\n') old_head_file = cli._get_head_file_path( self.configs[0]) old_heads_file =", "[] self.projects = ('neutron', 'networking-foo', 'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini')", "### end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def", "cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a',", "[{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self,", "[{'revision': 'foo', 'sql': True}] ) def _validate_cmd(self, cmd): self._main_test_helper( ['prog',", "FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path(", "with the License. You may obtain # a copy of", "in expected_kwargs: kwarg['head'] = 'contract@head' self._main_test_helper( ['prog', 'revision', '-m', 'message',", "'-2']) def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade', '--delta', '-2']) def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog',", "'--%s revision1' % mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit):", "governing permissions and limitations # under the License. import copy", "'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels = {'label1', 'label2', 'label3'} fake_revision =", "'revision': 'expand@head', 'sql': False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper(", "'_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir = mock.Mock() fake_revision =", "_test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s revision1' %", ") self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): # Test", "Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock):", "mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect = SystemExit def mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root", "alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50),", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_table,", ") def test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade', '--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH,", "FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels = {'label1', 'label2',", "sa.String(50), nullable=False) ] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id',", "from neutron.db.migration import autogen from neutron.db.migration import cli from neutron.tests", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "'revision', '--autogenerate', '--%s' % branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH)", "alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = (\"\"\"\\ ### commands auto generated by", "so it can be restored during test cleanup. ''' def", "test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir", "import migration from neutron.db.migration import autogen from neutron.db.migration import cli", "Avoid cluttering stdout with argparse error messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys,", "if branch == 'expand' else \"/foo/contract\") migration_script = alembic_ops.MigrationScript( 'eced083f5df',", "except in compliance with the License. You may obtain #", "contract = directives[1] self.assertEqual(\"/foo/expand\", expand.version_path) self.assertEqual(\"/foo/contract\", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def", "{heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con = self.useFixture(", "cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] + '\\n') mock_open_ex.return_value.write.assert_called_with(heads[1] + '\\n') old_head_file =", "sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', \"\"\" \"\"\"'organization', ['organization_id'], ['id']) ### end", "'user', \"\"\" \"\"\"'organization', ['organization_id'], ['id']) ### end Alembic commands ###\"\"\")", "if contract_head in heads and expand_head in heads: cli.validate_head_files(fake_config) elif", "= FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value =", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock):", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads') def test_check_migration(self): with mock.patch.object(cli, 'validate_head_files') as", "_test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision', '--autogenerate', '--%s' %", "limitations # under the License. import copy import os import", "messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv', command), mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit,", "= cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir,", "expand_head in heads: cli.validate_head_files(fake_config) elif head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises(", "during test cleanup. ''' def _setUp(self): self.ep_backup = {} for", "= [migration.LIBERTY] walk_mock.return_value = c_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH)", "False self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock()", "revisions = [FakeRevision() for i in range(10)] walk_mock.return_value = revisions", "'-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs =", "e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs m =", "for revision in revisions] ) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points(", "autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives ) expand = directives[0] contract =", "argparse error messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv', command), mock.patch.object( cli,", "in heads and expand_head in heads: cli.validate_head_files(fake_config) elif head_files_not_exist: cli.validate_head_files(fake_config)", "len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self,", "expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate': False, 'head':", "'neutron', 'upgrade', 'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql': False},", "= False else: os_mock.return_value = True fc.return_value.get_heads.return_value = heads revs", "_main_test_helper(self, argv, func_name, exp_kwargs=[{}]): with mock.patch.object(sys, 'argv', argv),\\ mock.patch.object(cli, 'run_sanity_checks'),\\", "mock.patch.object(cli, 'run_sanity_checks'),\\ mock.patch.object(cli, 'validate_revisions'): cli.main() def _append_version_path(args): args = copy.copy(args)", "update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': True, 'autogenerate':", "self.projects = ('neutron', 'networking-foo', 'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento())", "[{'desc': None, 'revision': '+3', 'sql': False}] ) def test_upgrade_revision_delta(self): self._main_test_helper(", "== '') with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\\ mock.patch('os.path.exists') as os_mock: if", "fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect", "separate_branches=True): with mock.patch.object(cli, 'update_head_files') as update: if separate_branches: mock.patch('os.path.exists').start() expected_kwargs", "'_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir # Avoid creating fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start()", "# Avoid creating fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set up some", "'# ' before comments return s.replace('\\\\#\\\\#\\\\#\\\\ ', '(# )?### ')", "self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_warn = mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect = SystemExit def", "def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]", "specific language governing permissions and limitations # under the License.", "# not use this file except in compliance with the", "r in range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = revisions m", "= {} for proj, ep in migration_cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep)", "cfg, release, branch: ( \"/foo/expand\" if branch == 'expand' else", "rev in revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives(self,", "neutron.conf.db import migration_cli from neutron.db import migration from neutron.db.migration import", "**_append_version_path(kwargs)) for kwargs in exp_kwargs] ) def test_stamp(self): self._main_test_helper( ['prog',", "oslo_utils import fileutils import pkg_resources import sqlalchemy as sa from", "validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock):", "separate # expand/contract scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization', [ sa.Column('id',", "old branchless approach is still supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper(", "'delete_if_exists') def test_update_head_files_success(self, *mocks): heads = ['a', 'b'] mock_open_con =", "expected_kwargs = [{ 'message': 'message', 'sql': True, 'autogenerate': False, 'head':", "sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ] ), alembic_ops.ModifyTableOps( 'user',", "to chew on self.configs = [] self.projects = ('neutron', 'networking-foo',", "mock.Mock() fake_revision = FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision')", "in range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = revisions m =", "= mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy of the migration", "fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels = set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels)", "fc,\\ mock.patch('os.path.exists') as os_mock: if head_files_not_exist: os_mock.return_value = False else:", "migration from neutron.db.migration import autogen from neutron.db.migration import cli from", "'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id') ] ), alembic_ops.DropTableOp('organization') ]", "contract_head='', expand_head=''): fake_config = self.configs[0] head_files_not_exist = (contract_head == expand_head", "if separate_branches: mock.patch('os.path.exists').start() expected_kwargs = [{ 'message': 'message', 'sql': False,", "import re import sys import textwrap from alembic.autogenerate import api", "commands auto generated by Alembic - please adjust! ### op.drop_constraint('user',", "def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def", "script_dir, fake_revision, label='fakebranch') expected_labels = {'fakebranch'} compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for", "mock_inspector.get_columns.return_value = [{'name': 'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector def", "def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self):", "fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock): revisions", "self._main_test_helper( ['prog', 'stamp', 'foo'], 'stamp', [{'revision': 'foo', 'sql': False}] )", "self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): # Test that", "under the License is distributed on an \"AS IS\" BASIS,", "FakeRevision() cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions(", "cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo') expected =", "'upgrade', 'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql': False}, {'desc':", "leading '# ' before comments return s.replace('\\\\#\\\\#\\\\#\\\\ ', '(# )?###", "'a.b.c:d') versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self): foo_ep =", "def test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade', '--delta', '3'], 'upgrade', [{'desc': None,", "= FakeRevision() branch = cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path', branch) cli._validate_revision(script_dir,", "'sql': True, 'autogenerate': False, 'head': cli._get_branch_head(branch) } for branch in", "script_dir = mock.Mock() fake_revision = FakeRevision() branch = cli.MIGRATION_BRANCHES[0] fake_revision.path", "= ('alembic_migrations',) script_location = ':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint", "migration_cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore) def restore(self): migration_cli.migration_entrypoints = self.ep_backup", "test__find_milestone_revisions_two_branches(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone", "this file except in compliance with the License. You may", "as os_mock: if head_files_not_exist: os_mock.return_value = False else: os_mock.return_value =", "from testtools import matchers from neutron.conf.db import migration_cli from neutron.db", "mock.patch.object(cli, 'validate_revisions'): cli.main() def _append_version_path(args): args = copy.copy(args) if 'autogenerate'", "'upgrade', [{'desc': None, 'revision': 'kilo+3', 'sql': False}] ) def test_upgrade_expand(self):", "import sys import textwrap from alembic.autogenerate import api as alembic_ag_api", "heads and expand_head in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if", "r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for", "test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError,", "as alembic_ops from alembic import script as alembic_script import fixtures", "self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self): config = alembic_config.Config() config.set_main_option('script_location',", "FakeRevision() branch = cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision)", "def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column'))", "mock_root.side_effect = mocked_root_dir # Avoid creating fake directories mock.patch('oslo_utils.fileutils.ensure_tree').start() #", "revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for revision in revisions] )", "os_mock.return_value = True fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),", "[FakeRevision(is_branch_point=tools.get_random_boolean) for i in range(50)] walk_mock.return_value = revisions script_dir =", "= [FakeRevision(is_branch_point=tools.get_random_boolean) for i in range(50)] walk_mock.return_value = revisions script_dir", "copy.copy(args) if 'autogenerate' in args and not args['autogenerate']: args['version_path'] =", "foo_ep = cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self):", "def test__find_milestone_revisions_branchless(self, walk_mock): revisions = [FakeRevision() for r in range(5)]", "'user', 'organization_name' ) ] ) ] ), # these will", "file except in compliance with the License. You may obtain", "'_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision()", "alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk',", "'--sql'], 'stamp', [{'revision': 'foo', 'sql': True}] ) def _validate_cmd(self, cmd):", "script_dir, fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir", "class TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch(", "'--delta', '3'], 'upgrade', [{'desc': None, 'revision': '+3', 'sql': False}] )", "= cli._get_project_base(config) self.assertEqual('a', proj_base) def test_get_root_versions_dir(self): config = alembic_config.Config() config.set_main_option('script_location',", "OR CONDITIONS OF ANY KIND, either express or implied. See", "update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate': False,", "= alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base = cli._get_project_base(config) self.assertEqual('a', proj_base) def", "in exp_kwargs] ) def test_stamp(self): self._main_test_helper( ['prog', 'stamp', 'foo'], 'stamp',", "[FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs =", "kwarg['sql'] = True self._main_test_helper( ['prog', 'revision', '--sql', '-m', 'message'], 'revision',", "self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = (\"\"\"\\ ### commands auto generated", "end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = (\"\"\"\\", "textwrap.dedent(s) s = re.escape(s) # alembic 0.8.9 added additional leading", "mock.patch('oslo_utils.fileutils.ensure_tree').start() # Set up some configs and entrypoints for tests", "expected_labels = set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in (fake_revision,", "alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name', sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user',", "expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels') def", "m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0],", "mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql',", "self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self, *mocks): heads", "fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a',", "test_check_migration(self): with mock.patch.object(cli, 'validate_head_files') as validate: self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects),", "mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade',", "self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err =", "None, 'revision': '+3', 'sql': False}] ) def test_upgrade_revision_delta(self): self._main_test_helper( ['prog',", "self._main_test_helper( ['prog', '--subproject', 'neutron', 'upgrade', 'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision':", "in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs m =", "class FakeConfig(object): service = '' class FakeRevision(object): path = 'fakepath'", "project in self.projects: config = alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name =", "cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno'])", "branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir, fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def", "'message', 'sql': False, 'autogenerate': False, 'head': 'expand@head' }] self._main_test_helper( ['prog',", "cli._get_project_base(cfg)) mock_root = mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir # Avoid", "= lambda cfg, release, branch: ( \"/foo/expand\" if branch ==", "validate.call_count) def _test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli, 'update_head_files') as update: if", "if 'autogenerate' in args and not args['autogenerate']: args['version_path'] = mock.ANY", "is still supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade', '--sql',", "= mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_warn", "def test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade', '--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision':", "mock_open_ex.return_value.write.assert_called_with(heads[1] + '\\n') old_head_file = cli._get_head_file_path( self.configs[0]) old_heads_file = cli._get_heads_file_path(", "walk_mock.return_value = c_revs + e_revs self._main_test_helper( ['prog', '--subproject', 'neutron', 'upgrade',", "'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision', '--autogenerate',", "'stamp', [{'revision': 'foo', 'sql': False}] ) self._main_test_helper( ['prog', 'stamp', 'foo',", "cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self,", "'--sql', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs", "'upgrade', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': '+3', 'sql': False}]", "['prog', 'revision', '-m', 'message', '--contract'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count)", ") @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock): revisions", "nullable=False) ] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer())", "alembic_ops from alembic import script as alembic_script import fixtures import", "False, 'head': cli._get_branch_head(branch) } for branch in cli.MIGRATION_BRANCHES] for kwarg", "in cli.MIGRATION_BRANCHES] for kwarg in expected_kwargs: kwarg['autogenerate'] = False kwarg['sql']", "validate_mock.assert_called_with( script_dir, fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self, validate_mock):", "is_branch_point self.revision = helpers.get_random_string(10) self.module = mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create", "len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) class TestSafetyChecks(base.BaseTestCase): def", "in heads and expand_head in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config))", "LLC (DreamHost) # All Rights Reserved. # # Licensed under", "self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore) def restore(self): migration_cli.migration_entrypoints = self.ep_backup class", "'3']) def _test_validate_head_files_helper(self, heads, contract_head='', expand_head=''): fake_config = self.configs[0] head_files_not_exist", "tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value = heads revs", "TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode',", "expected_kwargs: kwarg['head'] = 'contract@head' self._main_test_helper( ['prog', 'revision', '-m', 'message', '--contract'],", ") self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs: kwarg['head'] =", "[FakeRevision() for r in range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value =", "writing, software # distributed under the License is distributed on", "False}] ) def test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade', '--expand'], 'upgrade', [{'desc':", ") def assert_command_fails(self, command): # Avoid cluttering stdout with argparse", "kwarg['autogenerate'] = False kwarg['sql'] = True self._main_test_helper( ['prog', 'revision', '--sql',", "test_stamp(self): self._main_test_helper( ['prog', 'stamp', 'foo'], 'stamp', [{'revision': 'foo', 'sql': False}]", "base from neutron.tests import tools from neutron.tests.unit import testlib_api class", "mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector", "the License. You may obtain # a copy of the", "use this file except in compliance with the License. You", "'--expand --contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog',", "= os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints = {} for project in", "os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints = {} for project in self.projects:", "supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade', '--sql', 'head'], 'upgrade',", "= True self._main_test_helper( ['prog', 'revision', '--sql', '-m', 'message'], 'revision', expected_kwargs", "MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy of the migration entrypoints map so", "script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels = set() compare_mock.assert_has_calls(", "= False self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector =", "Set up some configs and entrypoints for tests to chew", "labels=None, down_revision=None, is_branch_point=False): if not labels: labels = set() self.branch_labels", "cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m))", "script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch') expected_labels = {'fakebranch'}", "attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project, module_name, attrs=attrs) migration_cli.migration_entrypoints[project]", "= [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value", "express or implied. See the # License for the specific", "self._main_test_helper( ['prog', 'upgrade', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': '+3',", "# alembic 0.8.9 added additional leading '# ' before comments", "the Apache License, Version 2.0 (the \"License\"); you may #", "@mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self, *mocks): heads = ['a', 'b'] mock_open_con", "cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self): labels = {'label1', 'label2'} fake_revision =", "not head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b') def", "for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs", "= cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) class TestSafetyChecks(base.BaseTestCase): def test_validate_revisions(self, *mocks):", "matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = (\"\"\"\\ ### commands auto generated by Alembic", "heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist: fc.assert_called_once_with(fake_config) def", "self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions')", ") def test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade', '--delta', '3'], 'upgrade', [{'desc':", "self._main_test_helper( ['prog', 'upgrade', '--%s +3' % mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self,", "not labels: labels = set() self.branch_labels = labels self.down_revision =", "'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo'))", "['prog', 'stamp', 'foo', '--sql'], 'stamp', [{'revision': 'foo', 'sql': True}] )", "= alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for rev in revisions if rev.is_branch_point),", "not args['autogenerate']: args['version_path'] = mock.ANY return args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name,", "@mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect = lambda cfg, release,", "'upgrade', [{'desc': None, 'revision': '+3', 'sql': False}] ) def test_upgrade_revision_delta(self):", "self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh'))", "for i in range(2)] walk_mock.return_value = revisions self.assertRaises( SystemExit, cli.validate_revisions,", "branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision', '--autogenerate', '--%s' % branch],", "= (\"\"\"\\ ### commands auto generated by Alembic - please", "\"\"\" \"\"\"'organization', ['organization_id'], ['id']) ### end Alembic commands ###\"\"\") self.assertThat(", "get_version_branch_path): get_version_branch_path.side_effect = lambda cfg, release, branch: ( \"/foo/expand\" if", "['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id')", "= directives[0] contract = directives[1] self.assertEqual(\"/foo/expand\", expand.version_path) self.assertEqual(\"/foo/contract\", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty())", "SystemExit, cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self): labels = {'label1', 'label2'} fake_revision", "'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def", "these will be discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name',", "config as alembic_config from alembic.operations import ops as alembic_ops from", "'foo', column_name='meh')) class TestCli(base.BaseTestCase): def setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p =", "args['version_path'] = mock.ANY return args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **_append_version_path(kwargs)) for", "['prog', 'revision', '--autogenerate', '--%s' % branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch(", "'user', ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user',", "self.assertEqual(2, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def", "self.configs[0]) delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self):", "test__validate_revision_validates_branches(self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() branch =", "script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for rev in revisions if", "neutron.db.migration import autogen from neutron.db.migration import cli from neutron.tests import", "def _test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s revision1'", "self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s = textwrap.dedent(s) s = re.escape(s) #", "in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs", "branch = cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with(", "config.set_main_option('script_location', 'a.b.c:d') proj_base = cli._get_project_base(config) self.assertEqual('a', proj_base) def test_get_root_versions_dir(self): config", "= mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector = mock.patch(", "mock.patch('os.path.exists').start() expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate': True,", "test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self):", "sa.String(50), nullable=True) ), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps(", "_test_validate_head_files_helper(self, heads, contract_head='', expand_head=''): fake_config = self.configs[0] head_files_not_exist = (contract_head", "'update_head_files') as update: if separate_branches: mock.patch('os.path.exists').start() expected_kwargs = [{ 'message':", "self._main_test_helper( ['prog', 'revision', '-m', 'message', '--expand'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects),", "self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def", "'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo',", "command): # Avoid cluttering stdout with argparse error messages mock.patch('argparse.ArgumentParser._print_message').start()", "cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit, cli.validate_head_files, fake_config ) self.assertTrue(self.mock_alembic_err.called) if", "Alembic - please adjust! ### op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user', 'organization_name')", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "def restore(self): migration_cli.migration_entrypoints = self.ep_backup class TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration,", "+ '\\n')).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head + '\\n')).mock_open", "True}]) def test_branches(self): self._validate_cmd('branches') def test_current(self): self._validate_cmd('current') def test_history(self): self._validate_cmd('history')", "sys import textwrap from alembic.autogenerate import api as alembic_ag_api from", "commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex = (\"\"\"\\ ### commands", "sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'], ['id'] ), alembic_ops.DropConstraintOp(", "set() self.branch_labels = labels self.down_revision = down_revision self.is_branch_point = is_branch_point", "from alembic import script as alembic_script import fixtures import mock", "'sql': False, 'autogenerate': False, 'head': 'expand@head' }] self._main_test_helper( ['prog', 'revision',", "def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def", "(contract_head == expand_head == '') with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\\ mock.patch('os.path.exists')", "0.8.9 added additional leading '# ' before comments return s.replace('\\\\#\\\\#\\\\#\\\\", "validate: self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True): with", "from alembic.operations import ops as alembic_ops from alembic import script", "'_') + '.db.migration' attrs = ('alembic_migrations',) script_location = ':'.join([module_name, attrs[0]])", "License for the specific language governing permissions and limitations #", "False}, {'desc': cli.CONTRACT_BRANCH, 'revision': c_revs[1].revision, 'sql': False}] ) def assert_command_fails(self,", "attrs = ('alembic_migrations',) script_location = ':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config)", "= pkg_resources.EntryPoint(project, module_name, attrs=attrs) migration_cli.migration_entrypoints[project] = entrypoint def _main_test_helper(self, argv,", "def test_current(self): self._validate_cmd('current') def test_history(self): self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads') def", "a copy of the migration entrypoints map so it can", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "'label2', 'label3'} fake_revision = FakeRevision(labels) self.assertRaises( SystemExit, cli._compare_labels, fake_revision, {'label1',", "revisions self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock): revisions", "def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='c', expand_head='d')", "func_name, exp_kwargs=[{}]): with mock.patch.object(sys, 'argv', argv),\\ mock.patch.object(cli, 'run_sanity_checks'),\\ mock.patch.object(cli, 'validate_revisions'):", "test_heads(self): self._validate_cmd('heads') def test_check_migration(self): with mock.patch.object(cli, 'validate_head_files') as validate: self._main_test_helper(['prog',", "'revision', '-m', 'message', '--expand'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock()", "self._main_test_helper( ['prog', 'revision', '--autogenerate', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects),", "'stamp', 'foo'], 'stamp', [{'revision': 'foo', 'sql': False}] ) self._main_test_helper( ['prog',", "Rights Reserved. # # Licensed under the Apache License, Version", "'upgrade', '--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql': False}] )", "import script as alembic_script import fixtures import mock from neutron_lib.utils", "self._main_test_helper( ['prog', 'upgrade', '--%s revision1' % mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self,", "\"\"\" \"\"\"sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', \"\"\" \"\"\"'organization', ['organization_id'], ['id'])", "expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision() def test_database_sync_revision_no_branches(self): #", "cli._get_subproject_script_location, 'not-installed') def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self):", "= cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock): revisions", "revision1' % mode], 'upgrade') def _test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper(", "_prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo', 'bar'] mock_inspector.get_columns.return_value =", "{'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision =", "len(m)) m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def", "= {} for project in self.projects: config = alembic_config.Config(ini) config.set_main_option('neutron_project',", "args['autogenerate']: args['version_path'] = mock.ANY return args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **_append_version_path(kwargs))", "self.assertTrue(self.mock_alembic_err.called) if contract_head in heads and expand_head in heads: mock_open_ex.assert_called_with(", "m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self,", "revs.__getitem__ mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head + '\\n')).mock_open mock_open_ex", "[mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_compare_labels')", "['prog', 'revision', '--autogenerate', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count)", "self.branch_labels = labels self.down_revision = down_revision self.is_branch_point = is_branch_point self.revision", "the # License for the specific language governing permissions and", "in migration_cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore) def restore(self): migration_cli.migration_entrypoints =", "= c_revs + e_revs self._main_test_helper( ['prog', '--subproject', 'neutron', 'upgrade', 'liberty'],", "'expand@head' }] self._main_test_helper( ['prog', 'revision', '-m', 'message', '--expand'], 'revision', expected_kwargs", "= self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_warn = mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect =", "= fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels = set() compare_mock.assert_has_calls( [mock.call(revision,", "range(2)] walk_mock.return_value = revisions self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def", "self._main_test_helper( ['prog', 'upgrade', '--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql':", "head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self):", "of the migration entrypoints map so it can be restored", "return args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **_append_version_path(kwargs)) for kwargs in exp_kwargs]", "cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock): revisions =", "def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno']) def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade',", "= down_revision self.is_branch_point = is_branch_point self.revision = helpers.get_random_string(10) self.module =", "test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col') def test_schema_has_column_missing_column(self):", "'not-installed') def test__compare_labels_ok(self): labels = {'label1', 'label2'} fake_revision = FakeRevision(labels)", "auto generated by Alembic - please adjust! ### op.drop_constraint('user', 'uq_user_org',", "= [{'name': 'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector def test_schema_has_table(self):", "def setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd =", "(\"\"\"\\ ### commands auto generated by Alembic - please adjust!", "self.projects: config = alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name = project.replace('-', '_')", "':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project, module_name, attrs=attrs)", "with mock.patch.object(sys, 'argv', command), mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def", "= textwrap.dedent(s) s = re.escape(s) # alembic 0.8.9 added additional", "'stamp', 'foo', '--sql'], 'stamp', [{'revision': 'foo', 'sql': True}] ) def", "'sql': False}] ) self._main_test_helper( ['prog', 'stamp', 'foo', '--sql'], 'stamp', [{'revision':", ") self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql':", "c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]", "user.organization_name' ) directives = [migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives )", "\"\"\"sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', \"\"\" \"\"\"'organization', ['organization_id'], ['id']) ###", "range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in", "migration_cli.migration_entrypoints[project] = entrypoint def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]): with mock.patch.object(sys,", "= copy.copy(args) if 'autogenerate' in args and not args['autogenerate']: args['version_path']", "'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_warn = mock.patch('alembic.util.warn').start()", "mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start()", ") expand = directives[0] contract = directives[1] self.assertEqual(\"/foo/expand\", expand.version_path) self.assertEqual(\"/foo/contract\",", "tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as", "cli._get_branch_head(branch) } for branch in cli.MIGRATION_BRANCHES] for kwarg in expected_kwargs:", "contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s = textwrap.dedent(s) s =", "cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m)) class TestSafetyChecks(base.BaseTestCase): def test_validate_revisions(self, *mocks): cli.validate_revisions(cli.get_neutron_config())", "'uq_user_org' ), alembic_ops.DropColumnOp( 'user', 'organization_name' ) ] ) ] ),", "test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--expand --contract'], 'upgrade') def", "= heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect =", "neutron_lib.utils import helpers from oslo_utils import fileutils import pkg_resources import", "migration_script = alembic_ops.MigrationScript( 'eced083f5df', # these directives will be split", "'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class TestCli(base.BaseTestCase): def", "cli._compare_labels(fake_revision, {'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels = {'label1', 'label2', 'label3'}", "exp_kwargs] ) def test_stamp(self): self._main_test_helper( ['prog', 'stamp', 'foo'], 'stamp', [{'revision':", "for i in range(10)] walk_mock.return_value = revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY,", "in self.projects: config = alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name = project.replace('-',", "'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH})", "e_revs self._main_test_helper( ['prog', '--subproject', 'neutron', 'upgrade', 'liberty'], 'upgrade', [{'desc': cli.EXPAND_BRANCH,", "testtools import matchers from neutron.conf.db import migration_cli from neutron.db import", "mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s +3' % mode],", "self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for", "get_version_branch_path.side_effect = lambda cfg, release, branch: ( \"/foo/expand\" if branch", "attrs=attrs) migration_cli.migration_entrypoints[project] = entrypoint def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]): with", ") def test_stamp(self): self._main_test_helper( ['prog', 'stamp', 'foo'], 'stamp', [{'revision': 'foo',", "cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0, len(m))", "cli.MIGRATION_BRANCHES] for kwarg in expected_kwargs: kwarg['autogenerate'] = False kwarg['sql'] =", "fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] + '\\n') mock_open_ex.return_value.write.assert_called_with(heads[1] +", "ep in migration_cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore) def restore(self): migration_cli.migration_entrypoints", "heads and expand_head in heads: cli.validate_head_files(fake_config) elif head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called)", "== expand_head == '') with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\\ mock.patch('os.path.exists') as", "# Copyright 2012 New Dream Network, LLC (DreamHost) # All", "os import re import sys import textwrap from alembic.autogenerate import", "alembic.operations import ops as alembic_ops from alembic import script as", "FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision,", "[{'desc': None, 'revision': 'heads', 'sql': True}] ) def test_upgrade_delta(self): self._main_test_helper(", "'upgrade') def _test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s", "cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand')", "for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs", "[migration.LIBERTY] walk_mock.return_value = c_revs + e_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty')", "def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b'])", "test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade', '-2']) def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade', '--delta', '-2'])", "with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision', '--autogenerate', '--%s' % branch], 'revision')", "test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade',", "'user', sa.Column('organization_id', sa.Integer()) ), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'], ['id']", "helpers.get_random_string(10) self.module = mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy of", "FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] + '\\n') mock_open_ex.return_value.write.assert_called_with(heads[1]", "on self.configs = [] self.projects = ('neutron', 'networking-foo', 'neutron-fwaas') ini", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "self.is_branch_point = is_branch_point self.revision = helpers.get_random_string(10) self.module = mock.MagicMock() class", "test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector()", "'eced083f5df', # these directives will be split into separate #", "test__find_milestone_revisions_branchless(self, walk_mock): revisions = [FakeRevision() for r in range(5)] revisions[2].module.neutron_milestone", "re import sys import textwrap from alembic.autogenerate import api as", "you may # not use this file except in compliance", "for project in self.projects: config = alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name", "# expand/contract scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(),", "self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{ 'message': 'message', 'sql': True,", "'not-installed') def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed') def test__compare_labels_ok(self): labels", "c_revs[1].revision, 'sql': False}] ) def assert_command_fails(self, command): # Avoid cluttering", "= revisions m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m)) m =", "self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh',", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "'heads', 'sql': True}] ) def test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade', '--delta',", "'head': cli._get_branch_head(branch) } for branch in cli.MIGRATION_BRANCHES] for kwarg in", "'label2'}) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision()", "test_get_project_base(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base = cli._get_project_base(config) self.assertEqual('a',", "range(10)] walk_mock.return_value = revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for revision", "+3' % mode], 'upgrade') def _test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper(", "self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch('alembic.util.err').start()", "'.db.migration' attrs = ('alembic_migrations',) script_location = ':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location)", "cli.EXPAND_BRANCH) self.assertEqual(0, len(m)) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH})", "= ':'.join([module_name, attrs[0]]) config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project, module_name,", ")?### ') expected_regex = (\"\"\"\\ ### commands auto generated by", "None, 'revision': 'heads', 'sql': True}] ) def test_upgrade_delta(self): self._main_test_helper( ['prog',", "please adjust! ### op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user', 'organization_name') ### end", "{'label1', 'label2'}) def test__compare_labels_fail_unexpected_labels(self): labels = {'label1', 'label2', 'label3'} fake_revision", "def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class TestCli(base.BaseTestCase): def setUp(self):", "these directives will be split into separate # expand/contract scripts", "for the specific language governing permissions and limitations # under", "'upgrade') def _test_upgrade_conflicts_with_delta(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s", "rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect = lambda", "Test that old branchless approach is still supported self._test_database_sync_revision(separate_branches=False) def", "self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector =", "), alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'], ['id'] ), alembic_ops.DropConstraintOp( 'user',", "'organization', ['organization_id'], ['id'] ), alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ), alembic_ops.DropColumnOp( 'user',", "comments return s.replace('\\\\#\\\\#\\\\#\\\\ ', '(# )?### ') expected_regex = (\"\"\"\\", "alembic_config.Config(ini) config.set_main_option('neutron_project', project) module_name = project.replace('-', '_') + '.db.migration' attrs", "args and not args['autogenerate']: args['version_path'] = mock.ANY return args self.do_alembic_cmd.assert_has_calls(", "mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch') expected_labels =", "sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', \"\"\" \"\"\"sa.Column('organization_id',", "### op.create_table('organization', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') )", "'-m', 'message', '--expand'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for", "config.set_main_option('neutron_project', project) module_name = project.replace('-', '_') + '.db.migration' attrs =", "self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value", "( \"/foo/expand\" if branch == 'expand' else \"/foo/contract\") migration_script =", "self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def", "for kwargs in exp_kwargs] ) def test_stamp(self): self._main_test_helper( ['prog', 'stamp',", "'revision', '--sql', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock()", "alembic_script import fixtures import mock from neutron_lib.utils import helpers from", "import copy import os import re import sys import textwrap", "command), mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade',", "cli._validate_single_revision_labels(script_dir, fake_revision, label=None) expected_labels = set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for", "False, 'autogenerate': False, 'head': 'expand@head' }] self._main_test_helper( ['prog', 'revision', '-m',", "fake_revision) validate_mock.assert_called_with(script_dir, fake_revision) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self, walk_mock,", "with mock.patch.object(cli, 'validate_head_files') as validate: self._main_test_helper(['prog', 'check_migration'], 'branches') self.assertEqual(len(self.projects), validate.call_count)", "), alembic_ops.DropConstraintOp( 'user', 'uq_user_org' ), alembic_ops.DropColumnOp( 'user', 'organization_name' ) ]", "= entrypoint def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]): with mock.patch.object(sys, 'argv',", "labels = set() self.branch_labels = labels self.down_revision = down_revision self.is_branch_point", "] ), message='create the organization table and ' 'replace user.organization_name'", "'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{", "'foo'], 'stamp', [{'revision': 'foo', 'sql': False}] ) self._main_test_helper( ['prog', 'stamp',", "contract_head='c', expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self, *mocks): heads = ['a',", "be restored during test cleanup. ''' def _setUp(self): self.ep_backup =", "), # these will be discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user',", "{} for project in self.projects: config = alembic_config.Config(ini) config.set_main_option('neutron_project', project)", "delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d')", "test_current(self): self._validate_cmd('current') def test_history(self): self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads') def test_check_migration(self):", "[migration.LIBERTY] e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone =", "def test_history(self): self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads') def test_check_migration(self): with mock.patch.object(cli,", "'revision': 'heads', 'sql': True}] ) def test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade',", "error messages mock.patch('argparse.ArgumentParser._print_message').start() with mock.patch.object(sys, 'argv', command), mock.patch.object( cli, 'run_sanity_checks'):", "}] self._main_test_helper( ['prog', 'revision', '--autogenerate', '-m', 'message'], 'revision', expected_kwargs )", ") op.add_column('user', \"\"\" \"\"\"sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user', \"\"\" \"\"\"'organization',", "self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_column,", "def test_upgrade_revision(self): self._main_test_helper( ['prog', 'upgrade', '--sql', 'head'], 'upgrade', [{'desc': None,", "mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config), contract_head + '\\n')).mock_open mock_open_ex =", "'sql': False}] ) def test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade', '--expand'], 'upgrade',", "os_mock.return_value = False else: os_mock.return_value = True fc.return_value.get_heads.return_value = heads", "# Set up some configs and entrypoints for tests to", "), alembic_ops.CreateUniqueConstraintOp( 'uq_user_org', 'user', ['user_name', 'organization_name'] ), alembic_ops.ModifyTableOps( 'user', ops=[", "= mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir # Avoid creating fake", "+ '.db.migration' attrs = ('alembic_migrations',) script_location = ':'.join([module_name, attrs[0]]) config.set_main_option('script_location',", "migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector()", "and expand_head in heads: cli.validate_head_files(fake_config) elif head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else:", "in range(10)] walk_mock.return_value = revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for", "if head_files_not_exist: os_mock.return_value = False else: os_mock.return_value = True fc.return_value.get_heads.return_value", "@mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_walks_thru_all_revisions( self, walk_mock, validate_mock): revisions =", "validate_mock): revisions = [FakeRevision(is_branch_point=True) for i in range(2)] walk_mock.return_value =", "You may obtain # a copy of the License at", "revisions] ) @mock.patch.object(cli, '_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock):", "cmd, '--verbose'], cmd, [{'verbose': True}]) def test_branches(self): self._validate_cmd('branches') def test_current(self):", "self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self):", "self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value = heads", "fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value", "import os import re import sys import textwrap from alembic.autogenerate", "'revision', '-m', 'message', '--contract'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) def", "self.mock_sa_inspector.from_engine.return_value = mock_inspector def test_schema_has_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_table('foo')) def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value", "testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--expand --contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self, mode):", "def test_upgrade_expand(self): self._main_test_helper( ['prog', 'upgrade', '--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision':", "some configs and entrypoints for tests to chew on self.configs", "setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value", "self, compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir =", ") @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision()", "op.drop_column('user', 'organization_name') ### end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex)))", "args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **_append_version_path(kwargs)) for kwargs in exp_kwargs] )", "neutron.db import migration from neutron.db.migration import autogen from neutron.db.migration import", "can be restored during test cleanup. ''' def _setUp(self): self.ep_backup", "= {'label1', 'label2'} fake_revision = FakeRevision(labels) cli._compare_labels(fake_revision, {'label1', 'label2'}) def", "cli._compare_labels, fake_revision, {'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock):", "fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir,", "nullable=True)) op.create_foreign_key('org_fk', 'user', \"\"\" \"\"\"'organization', ['organization_id'], ['id']) ### end Alembic", "'--delta', '3']) def _test_validate_head_files_helper(self, heads, contract_head='', expand_head=''): fake_config = self.configs[0]", "test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def test_upgrade_expand_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self):", "[{'desc': None, 'revision': 'kilo+3', 'sql': False}] ) def test_upgrade_expand(self): self._main_test_helper(", "config.set_main_option('script_location', 'a.b.c:d') versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self): foo_ep", "walk_mock.return_value = revisions self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self,", "[FakeRevision(is_branch_point=True) for i in range(2)] walk_mock.return_value = revisions self.assertRaises( SystemExit,", "'upgrade', '--expand'], 'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql': False}] )", "chew on self.configs = [] self.projects = ('neutron', 'networking-foo', 'neutron-fwaas')", "with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value = heads revs = {heads[0]:", "cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit,", "self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper(", "test_get_root_versions_dir(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions',", "import matchers from neutron.conf.db import migration_cli from neutron.db import migration", "= self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head + '\\n')).mock_open if contract_head in", "class TestCli(base.BaseTestCase): def setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')", "self.configs[0]) self.assertEqual(set(rev for rev in revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli,", "['prog', cmd, '--verbose'], cmd, [{'verbose': True}]) def test_branches(self): self._validate_cmd('branches') def", "'foo', 'foo_col') def test_schema_has_column_missing_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class TestCli(base.BaseTestCase):", "mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy of the migration entrypoints", "and ' 'replace user.organization_name' ) directives = [migration_script] autogen.process_revision_directives( mock.Mock(),", "\"/foo/expand\" if branch == 'expand' else \"/foo/contract\") migration_script = alembic_ops.MigrationScript(", "migration_cli.migration_entrypoints = self.ep_backup class TestDbMigration(base.BaseTestCase): def setUp(self): super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start()", "up some configs and entrypoints for tests to chew on", "delete_if_exists.call_args_list) def test_get_project_base(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base =", "FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels(script_dir, fake_revision, label=None)", "walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone =", "True self._main_test_helper( ['prog', 'revision', '--sql', '-m', 'message'], 'revision', expected_kwargs )", "def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade', '--contract'], 'upgrade',", "set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect = lambda cfg,", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "['prog', 'revision', '--sql', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count)", "[{'revision': 'foo', 'sql': False}] ) self._main_test_helper( ['prog', 'stamp', 'foo', '--sql'],", "mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo', 'bar'] mock_inspector.get_columns.return_value = [{'name': 'foo_column'}, {'name':", "'upgrade', '-2']) def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade', '--delta', '-2']) def test_upgrade_rejects_delta_with_relative_revision(self):", "'\\n') old_head_file = cli._get_head_file_path( self.configs[0]) old_heads_file = cli._get_heads_file_path( self.configs[0]) delete_if_exists", "= mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels( script_dir, fake_revision, label='fakebranch') expected_labels", "import sqlalchemy as sa from testtools import matchers from neutron.conf.db", "self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract') def test_upgrade_contract(self): self._main_test_helper( ['prog', 'upgrade', '--contract'],", "be discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name', sa.String(50), nullable=True)", "def _test_revision_autogenerate_conflicts_with_branch(self, branch): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision', '--autogenerate', '--%s'", "@mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branches(self, validate_mock): script_dir = mock.Mock() fake_revision =", "c_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m =", "= self.configs[0] head_files_not_exist = (contract_head == expand_head == '') with", "@mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_branchless(self, walk_mock): revisions = [FakeRevision() for r in", "') expected_regex = (\"\"\"\\ ### commands auto generated by Alembic", "= re.escape(s) # alembic 0.8.9 added additional leading '# '", "range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = revisions m = cli._find_milestone_revisions(self.configs[0],", "mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value", "test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() cli._validate_revision(script_dir,", "self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config')", "cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH)", "'--sql', 'juno']) def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade', '-2']) def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog',", "test_downgrade_fails(self): self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno']) def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade', '-2'])", "'message': 'message', 'sql': False, 'autogenerate': False, 'head': 'expand@head' }] self._main_test_helper(", "[{ 'message': 'message', 'sql': True, 'autogenerate': False, 'head': cli._get_branch_head(branch) }", "self._test_upgrade_conflicts_with_revision('expand') def test_upgrade_contract_conflicts_with_revision(self): self._test_upgrade_conflicts_with_revision('contract') def test_upgrade_expand_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('expand') def test_upgrade_contract_conflicts_with_delta(self): self._test_upgrade_conflicts_with_delta('contract')", "'networking-foo', 'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini') self.useFixture(MigrationEntrypointsMemento()) migration_cli.migration_entrypoints = {}", "def _setUp(self): self.ep_backup = {} for proj, ep in migration_cli.migration_entrypoints.items():", "= {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con =", "argv),\\ mock.patch.object(cli, 'run_sanity_checks'),\\ mock.patch.object(cli, 'validate_revisions'): cli.main() def _append_version_path(args): args =", "'_get_version_branch_path') def test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect = lambda cfg, release, branch:", "assert_command_fails(self, command): # Avoid cluttering stdout with argparse error messages", "self.assertEqual(len(self.projects), validate.call_count) def _test_database_sync_revision(self, separate_branches=True): with mock.patch.object(cli, 'update_head_files') as update:", "alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self):", "discarded alembic_ops.DowngradeOps( ops=[ alembic_ops.AddColumnOp( 'user', sa.Column( 'organization_name', sa.String(50), nullable=True) ),", "c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)] c_revs[1].module.neutron_milestone = [migration.LIBERTY]", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "'revision', '--autogenerate', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock()", "mock.Mock(), mock.Mock(), directives ) expand = directives[0] contract = directives[1]", "', '(# )?### ') expected_regex = (\"\"\"\\ ### commands auto", "cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for", "test_history(self): self._validate_cmd('history') def test_heads(self): self._validate_cmd('heads') def test_check_migration(self): with mock.patch.object(cli, 'validate_head_files')", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "walk_mock): revisions = [FakeRevision() for r in range(5)] revisions[2].module.neutron_milestone =", "label=None) expected_labels = set() compare_mock.assert_has_calls( [mock.call(revision, expected_labels) for revision in", "@mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision", "compare_mock): fake_down_revision = FakeRevision() fake_revision = FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock()", "contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def test_validate_head_files_wrong_contents(self): self._test_validate_head_files_helper(['a', 'b'],", "'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed') def", "'_compare_labels') def test__validate_single_revision_labels_branches_fail_different_labels( self, compare_mock): fake_down_revision = FakeRevision() fake_revision =", "'revision': '+3', 'sql': False}] ) def test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade',", "def test_schema_has_table_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_table, 'foo') def test_schema_has_column_missing_table(self):", "old_heads_file = cli._get_heads_file_path( self.configs[0]) delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file),", "mock.patch.object(sys, 'argv', command), mock.patch.object( cli, 'run_sanity_checks'): self.assertRaises(SystemExit, cli.main) def test_downgrade_fails(self):", "walk_mock.return_value = revisions script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev for rev", "['prog', cmd], cmd, [{'verbose': False}]) self._main_test_helper( ['prog', cmd, '--verbose'], cmd,", "import ops as alembic_ops from alembic import script as alembic_script", "for r in range(5)] revisions[2].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = revisions", "def test_upgrade_expand_contract_are_mutually_exclusive(self): with testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--expand --contract'], 'upgrade')", "KIND, either express or implied. See the # License for", "'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': e_revs[3].revision, 'sql': False}, {'desc': cli.CONTRACT_BRANCH, 'revision':", "= FakeRevision(down_revision=fake_down_revision) script_dir = mock.Mock() script_dir.get_revision.return_value = fake_down_revision cli._validate_single_revision_labels( script_dir,", "return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root = mock.patch.object(cli, '_get_package_root_dir').start() mock_root.side_effect = mocked_root_dir", "= [] self.projects = ('neutron', 'networking-foo', 'neutron-fwaas') ini = os.path.join(os.path.dirname(cli.__file__),", "in expected_kwargs: kwarg['autogenerate'] = False kwarg['sql'] = True self._main_test_helper( ['prog',", "copy.copy(ep) self.addCleanup(self.restore) def restore(self): migration_cli.migration_entrypoints = self.ep_backup class TestDbMigration(base.BaseTestCase): def", "'upgrade', '--expand --contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit): self._main_test_helper(", "'upgrade', [{'desc': cli.EXPAND_BRANCH, 'revision': 'expand@head', 'sql': False}] ) def test_upgrade_expand_contract_are_mutually_exclusive(self):", "fc: fc.return_value.get_heads.return_value = heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}", "'message': 'message', 'sql': False, 'autogenerate': True, }] self._main_test_helper( ['prog', 'revision',", "branchless approach is still supported self._test_database_sync_revision(separate_branches=False) def test_upgrade_revision(self): self._main_test_helper( ['prog',", "'--contract'], 'upgrade', [{'desc': cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions')", "walk_mock.return_value = revisions m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m)) m", "= cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0,", "= mock.Mock() fake_revision = FakeRevision() branch = cli.MIGRATION_BRANCHES[0] fake_revision.path =", "permissions and limitations # under the License. import copy import", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'],", "fake_revision = FakeRevision() branch = cli.MIGRATION_BRANCHES[0] fake_revision.path = os.path.join('/fake/path', branch)", "'upgrade', '--delta', '-2']) def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3'])", "elif head_files_not_exist: cli.validate_head_files(fake_config) self.assertTrue(self.mock_alembic_warn.called) else: self.assertRaises( SystemExit, cli.validate_head_files, fake_config )", "entrypoint def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]): with mock.patch.object(sys, 'argv', argv),\\", "test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock): revisions = [FakeRevision(is_branch_point=True) for i in", "update: if separate_branches: mock.patch('os.path.exists').start() expected_kwargs = [{ 'message': 'message', 'sql':", "revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0])", "op.create_foreign_key('org_fk', 'user', \"\"\" \"\"\"'organization', ['organization_id'], ['id']) ### end Alembic commands", "script_location) self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project, module_name, attrs=attrs) migration_cli.migration_entrypoints[project] = entrypoint", "matchers from neutron.conf.db import migration_cli from neutron.db import migration from", "as update: if separate_branches: mock.patch('os.path.exists').start() expected_kwargs = [{ 'message': 'message',", "'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch( cli.CONTRACT_BRANCH) def", "def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value = ['foo', 'bar'] mock_inspector.get_columns.return_value", "implied. See the # License for the specific language governing", "'--%s' % branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self):", "in range(2)] walk_mock.return_value = revisions self.assertRaises( SystemExit, cli.validate_revisions, self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions')", "os.path.join('/fake/path', branch) cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir, fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels')", "from neutron.db.migration import cli from neutron.tests import base from neutron.tests", "module_name, attrs=attrs) migration_cli.migration_entrypoints[project] = entrypoint def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]):", "revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con", "heads, contract_head='', expand_head=''): fake_config = self.configs[0] head_files_not_exist = (contract_head ==", "'_validate_revision') @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_validate_revisions_fails_on_multiple_branch_points( self, walk_mock, validate_mock): revisions = [FakeRevision(is_branch_point=True)", "== 'expand' else \"/foo/contract\") migration_script = alembic_ops.MigrationScript( 'eced083f5df', # these", "validate_mock.assert_has_calls( [mock.call(mock.ANY, revision) for revision in revisions] ) @mock.patch.object(cli, '_validate_revision')", "= cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka') self.assertEqual(0,", "cli._get_head_file_path( self.configs[0]) old_heads_file = cli._get_heads_file_path( self.configs[0]) delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file),", "fake_config = self.configs[0] head_files_not_exist = (contract_head == expand_head == '')", "def test__validate_revision_validates_branches(self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() branch", "alembic_ops.CreateTableOp( 'organization', [ sa.Column('id', sa.Integer(), primary_key=True), sa.Column('name', sa.String(50), nullable=False) ]", "branch: ( \"/foo/expand\" if branch == 'expand' else \"/foo/contract\") migration_script", "alembic_ops.DropColumnOp( 'user', 'organization_name' ) ] ) ] ), # these", "= alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') versions_dir = cli._get_root_versions_dir(config) self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def", "nullable=False), sa.Column('name', sa.String(length=50), nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', \"\"\" \"\"\"sa.Column('organization_id', sa.Integer(),", "'-2']) def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3']) def _test_validate_head_files_helper(self,", "def test__find_milestone_revisions_two_branches(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]", "expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg in expected_kwargs: kwarg['head']", "alembic import script as alembic_script import fixtures import mock from", "self.configs[0]))).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:", "self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project, module_name, attrs=attrs) migration_cli.migration_entrypoints[project] = entrypoint def", "heads revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__", "contract_head in heads and expand_head in heads: cli.validate_head_files(fake_config) elif head_files_not_exist:", "alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for", "= mock.ANY return args self.do_alembic_cmd.assert_has_calls( [mock.call(mock.ANY, func_name, **_append_version_path(kwargs)) for kwargs", "mock.patch.object(cli, 'do_alembic_command') self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_warn =", "obtain # a copy of the License at # #", "= mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect = SystemExit def mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg))", "'sql': True}] ) def test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade', '--delta', '3'],", "self, walk_mock, validate_mock): revisions = [FakeRevision() for i in range(10)]", "] ) ] ), # these will be discarded alembic_ops.DowngradeOps(", "False kwarg['sql'] = True self._main_test_helper( ['prog', 'revision', '--sql', '-m', 'message'],", "for branch in cli.MIGRATION_BRANCHES] for kwarg in expected_kwargs: kwarg['autogenerate'] =", "heads = ['a', 'b'] mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( self.configs[0]))).mock_open mock_open_ex", "= c_revs + e_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m))", "self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value", "['prog', 'stamp', 'foo'], 'stamp', [{'revision': 'foo', 'sql': False}] ) self._main_test_helper(", "for kwarg in expected_kwargs: kwarg['autogenerate'] = False kwarg['sql'] = True", "'--sql', 'head'], 'upgrade', [{'desc': None, 'revision': 'heads', 'sql': True}] )", "generated by Alembic - please adjust! ### op.create_table('organization', sa.Column('id', sa.Integer(),", "[mock.call(revision, expected_labels) for revision in (fake_revision, fake_down_revision)] ) @mock.patch.object(cli, '_validate_single_revision_labels')", "SystemExit def mocked_root_dir(cfg): return os.path.join('/fake/dir', cli._get_project_base(cfg)) mock_root = mock.patch.object(cli, '_get_package_root_dir').start()", "# Test that old branchless approach is still supported self._test_database_sync_revision(separate_branches=False)", "self, validate_mock): script_dir = mock.Mock() fake_revision = FakeRevision() cli._validate_revision(script_dir, fake_revision)", "[migration.LIBERTY] walk_mock.return_value = revisions m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1, len(m))", "# Avoid cluttering stdout with argparse error messages mock.patch('argparse.ArgumentParser._print_message').start() with", "in args and not args['autogenerate']: args['version_path'] = mock.ANY return args", "= helpers.get_random_string(10) self.module = mock.MagicMock() class MigrationEntrypointsMemento(fixtures.Fixture): '''Create a copy", "cleanup. ''' def _setUp(self): self.ep_backup = {} for proj, ep", "'sql': False}] ) def test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade', 'kilo', '--delta',", "cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist: fc.assert_called_once_with(fake_config) def test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a',", "config.set_main_option('script_location', script_location) self.configs.append(config) entrypoint = pkg_resources.EntryPoint(project, module_name, attrs=attrs) migration_cli.migration_entrypoints[project] =", "return_value=False).start() self.mock_alembic_is_offline.return_value = False self.mock_sa_inspector = mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self):", "self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self): config = alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base", "e_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m)) m = cli._find_milestone_revisions(self.configs[0],", "os_mock: if head_files_not_exist: os_mock.return_value = False else: os_mock.return_value = True", "m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(2, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'mitaka')", "<filename>neutron/tests/unit/db/test_migration.py # Copyright 2012 New Dream Network, LLC (DreamHost) #", "def test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect = lambda cfg, release, branch: (", "import autogen from neutron.db.migration import cli from neutron.tests import base", "self.do_alembic_cmd = self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_warn = mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= project.replace('-', '_') + '.db.migration' attrs = ('alembic_migrations',) script_location =", "'--autogenerate', '-m', 'message'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs", "expected_kwargs: kwarg['autogenerate'] = False kwarg['sql'] = True self._main_test_helper( ['prog', 'revision',", "and expand_head in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not", "fake_revision, {'label1', 'label2'}) @mock.patch.object(cli, '_compare_labels') def test__validate_single_revision_labels_branchless_fail_different_labels( self, compare_mock): fake_down_revision", "cmd, [{'verbose': False}]) self._main_test_helper( ['prog', cmd, '--verbose'], cmd, [{'verbose': True}])", "'--verbose'], cmd, [{'verbose': True}]) def test_branches(self): self._validate_cmd('branches') def test_current(self): self._validate_cmd('current')", "'message', '--expand'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() for kwarg", "= [migration.LIBERTY] walk_mock.return_value = revisions m = cli._find_milestone_revisions(self.configs[0], 'liberty') self.assertEqual(1,", "foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location, 'not-installed') def test_get_subproject_base_not_installed(self): self.assertRaises(", "commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__find_milestone_revisions_one_branch(self, walk_mock): c_revs", "from alembic import config as alembic_config from alembic.operations import ops", "@mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock): c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in", "def test_check_migration(self): with mock.patch.object(cli, 'validate_head_files') as validate: self._main_test_helper(['prog', 'check_migration'], 'branches')", "cmd): self._main_test_helper( ['prog', cmd], cmd, [{'verbose': False}]) self._main_test_helper( ['prog', cmd,", "mock_inspector.get_table_names.return_value = ['foo', 'bar'] mock_inspector.get_columns.return_value = [{'name': 'foo_column'}, {'name': 'bar_column'}]", "[FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value =", "def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo', 'foo_col') def", "self.do_alembic_cmd_p.start() self.mock_alembic_err = mock.patch('alembic.util.err').start() self.mock_alembic_warn = mock.patch('alembic.util.warn').start() self.mock_alembic_err.side_effect = SystemExit", "from oslo_utils import fileutils import pkg_resources import sqlalchemy as sa", "alembic_ops.CreateForeignKeyOp( 'org_fk', 'user', 'organization', ['organization_id'], ['id'] ), alembic_ops.DropConstraintOp( 'user', 'uq_user_org'", "neutron.tests.unit import testlib_api class FakeConfig(object): service = '' class FakeRevision(object):", "super(TestDbMigration, self).setUp() mock.patch('alembic.op.get_bind').start() self.mock_alembic_is_offline = mock.patch( 'alembic.context.is_offline_mode', return_value=False).start() self.mock_alembic_is_offline.return_value =", "branch in cli.MIGRATION_BRANCHES] for kwarg in expected_kwargs: kwarg['autogenerate'] = False", "self.assertFalse(migration.schema_has_column( 'foo', column_name='meh')) class TestCli(base.BaseTestCase): def setUp(self): super(TestCli, self).setUp() self.do_alembic_cmd_p", "'juno']) def test_upgrade_negative_relative_revision_fails(self): self.assert_command_fails(['prog', 'upgrade', '-2']) def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade',", "= [{ 'message': 'message', 'sql': True, 'autogenerate': False, 'head': cli._get_branch_head(branch)", "2.0 (the \"License\"); you may # not use this file", "= self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( self.configs[0]))).mock_open with mock.patch('alembic.script.ScriptDirectory.from_config') as fc: fc.return_value.get_heads.return_value =", "self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3']) def _test_validate_head_files_helper(self, heads, contract_head='', expand_head=''):", "SystemExit, cli._get_subproject_script_location, 'not-installed') def test_get_subproject_base_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_base, 'not-installed') def", "label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self, validate_mock): script_dir = mock.Mock()", "SystemExit, cli.validate_head_files, fake_config ) self.assertTrue(self.mock_alembic_err.called) if contract_head in heads and", "by applicable law or agreed to in writing, software #", "'upgrade', 'kilo', '--delta', '3'], 'upgrade', [{'desc': None, 'revision': 'kilo+3', 'sql':", "def test_heads(self): self._validate_cmd('heads') def test_check_migration(self): with mock.patch.object(cli, 'validate_head_files') as validate:", "cli.CONTRACT_BRANCH, 'revision': 'contract@head', 'sql': False}] ) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test_upgrade_milestone_expand_before_contract(self, walk_mock):", "'\\n')).mock_open if contract_head in heads and expand_head in heads: cli.validate_head_files(fake_config)", "revisions if rev.is_branch_point), set(cli._get_branch_points(script_dir))) @mock.patch.object(cli, '_get_version_branch_path') def test_autogen_process_directives(self, get_version_branch_path): get_version_branch_path.side_effect", "% branch], 'revision') def test_revision_autogenerate_conflicts_with_expand(self): self._test_revision_autogenerate_conflicts_with_branch( cli.EXPAND_BRANCH) def test_revision_autogenerate_conflicts_with_contract(self): self._test_revision_autogenerate_conflicts_with_branch(", "self.assertEqual(\"/foo/contract\", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s = textwrap.dedent(s) s", "else \"/foo/contract\") migration_script = alembic_ops.MigrationScript( 'eced083f5df', # these directives will", "expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected, foo_ep) def test_get_subproject_script_location_not_installed(self): self.assertRaises( SystemExit, cli._get_subproject_script_location,", "def _validate_cmd(self, cmd): self._main_test_helper( ['prog', cmd], cmd, [{'verbose': False}]) self._main_test_helper(", "# All Rights Reserved. # # Licensed under the Apache", "= [migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives ) expand = directives[0]", "c_revs + e_revs self._main_test_helper( ['prog', '--subproject', 'neutron', 'upgrade', 'liberty'], 'upgrade',", "range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs self._main_test_helper(", "### end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(expand.upgrade_ops), matchers.MatchesRegex(_get_regex(expected_regex))) expected_regex =", "FakeRevision(labels=cli.CONTRACT_BRANCH), heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ cli.update_head_files(self.configs[0]) mock_open_con.return_value.write.assert_called_with( heads[0] +", "] ), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.AddColumnOp( 'user', sa.Column('organization_id', sa.Integer()) ),", "restored during test cleanup. ''' def _setUp(self): self.ep_backup = {}", "directives = [migration_script] autogen.process_revision_directives( mock.Mock(), mock.Mock(), directives ) expand =", "migration_cli.migration_entrypoints = {} for project in self.projects: config = alembic_config.Config(ini)", "= mock.patch( 'sqlalchemy.engine.reflection.Inspector').start() def _prepare_mocked_sqlalchemy_inspector(self): mock_inspector = mock.MagicMock() mock_inspector.get_table_names.return_value =", "applicable law or agreed to in writing, software # distributed", "entrypoints map so it can be restored during test cleanup.", "cmd], cmd, [{'verbose': False}]) self._main_test_helper( ['prog', cmd, '--verbose'], cmd, [{'verbose':", "'3'], 'upgrade', [{'desc': None, 'revision': '+3', 'sql': False}] ) def", "testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'upgrade', '--%s revision1' % mode], 'upgrade') def", "'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) update.reset_mock() expected_kwargs = [{ 'message':", "re.escape(s) # alembic 0.8.9 added additional leading '# ' before", "e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)] e_revs[3].module.neutron_milestone = [migration.LIBERTY]", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "directives[1] self.assertEqual(\"/foo/expand\", expand.version_path) self.assertEqual(\"/foo/contract\", contract.version_path) self.assertTrue(expand.downgrade_ops.is_empty()) self.assertTrue(contract.downgrade_ops.is_empty()) def _get_regex(s): s", "['prog', 'upgrade', '--expand --contract'], 'upgrade') def _test_upgrade_conflicts_with_revision(self, mode): with testlib_api.ExpectedException(SystemExit):", "def __init__(self, labels=None, down_revision=None, is_branch_point=False): if not labels: labels =", "nullable=False), sa.PrimaryKeyConstraint('id') ) op.add_column('user', \"\"\" \"\"\"sa.Column('organization_id', sa.Integer(), nullable=True)) op.create_foreign_key('org_fk', 'user',", "autogen from neutron.db.migration import cli from neutron.tests import base from", "versions_dir) def test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations' self.assertEqual(expected,", "def test_upgrade_revision_delta(self): self._main_test_helper( ['prog', 'upgrade', 'kilo', '--delta', '3'], 'upgrade', [{'desc':", "migration_cli from neutron.db import migration from neutron.db.migration import autogen from", "'foo', 'sql': True}] ) def _validate_cmd(self, cmd): self._main_test_helper( ['prog', cmd],", "alembic_config.Config() config.set_main_option('script_location', 'a.b.c:d') proj_base = cli._get_project_base(config) self.assertEqual('a', proj_base) def test_get_root_versions_dir(self):", "directives ) expand = directives[0] contract = directives[1] self.assertEqual(\"/foo/expand\", expand.version_path)", "test_validate_head_files_success(self): self._test_validate_head_files_helper(['a', 'b'], contract_head='a', expand_head='b') def test_validate_head_files_missing_file(self): self._test_validate_head_files_helper(['a', 'b']) def", "as sa from testtools import matchers from neutron.conf.db import migration_cli", "neutron.tests import tools from neutron.tests.unit import testlib_api class FakeConfig(object): service", "'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value =", "self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir) def test_get_subproject_script_location(self): foo_ep = cli._get_subproject_script_location('networking-foo') expected = 'networking_foo.db.migration:alembic_migrations'", "if contract_head in heads and expand_head in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config))", "def test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade', '--delta', '-2']) def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade',", "separate_branches: mock.patch('os.path.exists').start() expected_kwargs = [{ 'message': 'message', 'sql': False, 'autogenerate':", "fake_revision) validate_mock.assert_called_with( script_dir, fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations( self,", "### op.drop_constraint('user', 'uq_user_org', type_=None) op.drop_column('user', 'organization_name') ### end Alembic commands", "test_schema_has_column_missing_table(self): self._prepare_mocked_sqlalchemy_inspector() self.assertFalse(migration.schema_has_column('meh', 'meh')) def test_schema_has_column(self): self._prepare_mocked_sqlalchemy_inspector() self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def", "# License for the specific language governing permissions and limitations", "False, 'autogenerate': True, }] self._main_test_helper( ['prog', 'revision', '--autogenerate', '-m', 'message'],", "sa from testtools import matchers from neutron.conf.db import migration_cli from", "alembic_ag_api from alembic import config as alembic_config from alembic.operations import", "proj, ep in migration_cli.migration_entrypoints.items(): self.ep_backup[proj] = copy.copy(ep) self.addCleanup(self.restore) def restore(self):", "e_revs[3].module.neutron_milestone = [migration.LIBERTY] walk_mock.return_value = c_revs + e_revs self._main_test_helper( ['prog',", "testlib_api.ExpectedException(SystemExit): self._main_test_helper( ['prog', 'revision', '--autogenerate', '--%s' % branch], 'revision') def", "'b'], contract_head='c', expand_head='d') @mock.patch.object(fileutils, 'delete_if_exists') def test_update_head_files_success(self, *mocks): heads =", "'message', '--contract'], 'revision', expected_kwargs ) self.assertEqual(len(self.projects), update.call_count) def test_database_sync_revision(self): self._test_database_sync_revision()", "= c_revs m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m", "self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head + '\\n')).mock_open if contract_head in heads", "'(# )?### ') expected_regex = (\"\"\"\\ ### commands auto generated", "self.configs[0] head_files_not_exist = (contract_head == expand_head == '') with mock.patch('alembic.script.ScriptDirectory.from_config')", "test_upgrade_negative_delta_fails(self): self.assert_command_fails(['prog', 'upgrade', '--delta', '-2']) def test_upgrade_rejects_delta_with_relative_revision(self): self.assert_command_fails(['prog', 'upgrade', '+2',", "cli.CONTRACT_BRANCH) self.assertEqual(1, len(m)) m = cli._find_milestone_revisions(self.configs[0], 'liberty', cli.EXPAND_BRANCH) self.assertEqual(0, len(m))", "import migration_cli from neutron.db import migration from neutron.db.migration import autogen", "in range(50)] walk_mock.return_value = revisions script_dir = alembic_script.ScriptDirectory.from_config( self.configs[0]) self.assertEqual(set(rev", "'bar'] mock_inspector.get_columns.return_value = [{'name': 'foo_column'}, {'name': 'bar_column'}] self.mock_sa_inspector.from_engine.return_value = mock_inspector", "Copyright 2012 New Dream Network, LLC (DreamHost) # All Rights", "), alembic_ops.ModifyTableOps( 'user', ops=[ alembic_ops.DropConstraintOp('org_fk', 'user'), alembic_ops.DropColumnOp('user', 'organization_id') ] ),", "heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)} fc.return_value.get_revision.side_effect = revs.__getitem__ mock_open_con = self.useFixture( tools.OpenFixture(cli._get_contract_head_file_path( fake_config),", "delete_if_exists = mocks[0] self.assertIn(mock.call(old_head_file), delete_if_exists.call_args_list) self.assertIn(mock.call(old_heads_file), delete_if_exists.call_args_list) def test_get_project_base(self): config", "'upgrade', '--sql', 'head'], 'upgrade', [{'desc': None, 'revision': 'heads', 'sql': True}]", "cli._validate_revision(script_dir, fake_revision) validate_mock.assert_called_with( script_dir, fake_revision, label=branch) @mock.patch.object(cli, '_validate_single_revision_labels') def test__validate_revision_validates_branchless_migrations(", "kwarg['head'] = 'contract@head' self._main_test_helper( ['prog', 'revision', '-m', 'message', '--contract'], 'revision',", "language governing permissions and limitations # under the License. import", "service = '' class FakeRevision(object): path = 'fakepath' def __init__(self,", "License. You may obtain # a copy of the License", "ANY KIND, either express or implied. See the # License", "New Dream Network, LLC (DreamHost) # All Rights Reserved. #", "contract_head + '\\n')).mock_open mock_open_ex = self.useFixture( tools.OpenFixture(cli._get_expand_head_file_path( fake_config), expand_head +", "expand_head in heads: mock_open_ex.assert_called_with( cli._get_expand_head_file_path(fake_config)) mock_open_con.assert_called_with( cli._get_contract_head_file_path(fake_config)) if not head_files_not_exist:", "License. import copy import os import re import sys import", "self.configs[0]) @mock.patch('alembic.script.ScriptDirectory.walk_revisions') def test__get_branch_points(self, walk_mock): revisions = [FakeRevision(is_branch_point=tools.get_random_boolean) for i", "[FakeRevision() for i in range(10)] walk_mock.return_value = revisions cli.validate_revisions(self.configs[0]) validate_mock.assert_has_calls(", "be split into separate # expand/contract scripts alembic_ops.UpgradeOps( ops=[ alembic_ops.CreateTableOp(", "type_=None) op.drop_column('user', 'organization_name') ### end Alembic commands ###\"\"\") self.assertThat( alembic_ag_api.render_python_code(contract.upgrade_ops),", "project) module_name = project.replace('-', '_') + '.db.migration' attrs = ('alembic_migrations',)", "= set() self.branch_labels = labels self.down_revision = down_revision self.is_branch_point =", "self.assertTrue(migration.schema_has_column('foo', 'foo_column')) def test_schema_has_column_raises_if_offline(self): self.mock_alembic_is_offline.return_value = True self.assertRaises(RuntimeError, migration.schema_has_column, 'foo',", "test_upgrade_delta(self): self._main_test_helper( ['prog', 'upgrade', '--delta', '3'], 'upgrade', [{'desc': None, 'revision':", "the migration entrypoints map so it can be restored during", "or implied. See the # License for the specific language" ]
[ "floor = initial_amount_inflation_adjusted * self.floor ceiling = initial_amount_inflation_adjusted * self.ceiling", "= self.rate * self.portfolio.value initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation floor", "def start(self): amount = self.rate * self.portfolio.value self.initial_amount = amount", "ceiling=1.25): super().__init__(portfolio, harvest_strategy) self.floor = Decimal(floor) self.ceiling = Decimal(ceiling) self.rate", "initial_amount_inflation_adjusted * self.ceiling amount = max(amount, floor) amount = min(amount,", "self.ceiling = Decimal(ceiling) self.rate = Decimal(rate) def start(self): amount =", "portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25): super().__init__(portfolio, harvest_strategy) self.floor = Decimal(floor)", "floor=.9, ceiling=1.25): super().__init__(portfolio, harvest_strategy) self.floor = Decimal(floor) self.ceiling = Decimal(ceiling)", "def next(self): amount = self.rate * self.portfolio.value initial_amount_inflation_adjusted = self.initial_amount", "self.floor = Decimal(floor) self.ceiling = Decimal(ceiling) self.rate = Decimal(rate) def", "* self.portfolio.value initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation floor = initial_amount_inflation_adjusted", "super().__init__(portfolio, harvest_strategy) self.floor = Decimal(floor) self.ceiling = Decimal(ceiling) self.rate =", "harvest_strategy, rate=.05, floor=.9, ceiling=1.25): super().__init__(portfolio, harvest_strategy) self.floor = Decimal(floor) self.ceiling", "= self.initial_amount * self.cumulative_inflation floor = initial_amount_inflation_adjusted * self.floor ceiling", "initial_amount_inflation_adjusted * self.floor ceiling = initial_amount_inflation_adjusted * self.ceiling amount =", "start(self): amount = self.rate * self.portfolio.value self.initial_amount = amount return", "* self.cumulative_inflation floor = initial_amount_inflation_adjusted * self.floor ceiling = initial_amount_inflation_adjusted", "Floor-to-Ceiling, as described in McClung's Living Off Your Money class", "__init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25): super().__init__(portfolio, harvest_strategy) self.floor =", "Decimal(rate) def start(self): amount = self.rate * self.portfolio.value self.initial_amount =", "self.cumulative_inflation floor = initial_amount_inflation_adjusted * self.floor ceiling = initial_amount_inflation_adjusted *", "from .abc import WithdrawalStrategy # Bengen's Floor-to-Ceiling, as described in", "from decimal import Decimal from .abc import WithdrawalStrategy # Bengen's", "described in McClung's Living Off Your Money class FloorCeiling(WithdrawalStrategy): def", "WithdrawalStrategy # Bengen's Floor-to-Ceiling, as described in McClung's Living Off", "def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25): super().__init__(portfolio, harvest_strategy) self.floor", "as described in McClung's Living Off Your Money class FloorCeiling(WithdrawalStrategy):", "rate=.05, floor=.9, ceiling=1.25): super().__init__(portfolio, harvest_strategy) self.floor = Decimal(floor) self.ceiling =", "Decimal(ceiling) self.rate = Decimal(rate) def start(self): amount = self.rate *", "self.portfolio.value initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation floor = initial_amount_inflation_adjusted *", "Decimal from .abc import WithdrawalStrategy # Bengen's Floor-to-Ceiling, as described", "harvest_strategy) self.floor = Decimal(floor) self.ceiling = Decimal(ceiling) self.rate = Decimal(rate)", "amount def next(self): amount = self.rate * self.portfolio.value initial_amount_inflation_adjusted =", "= amount return amount def next(self): amount = self.rate *", "<filename>withdrawal/floor_ceiling.py<gh_stars>10-100 from decimal import Decimal from .abc import WithdrawalStrategy #", "= self.rate * self.portfolio.value self.initial_amount = amount return amount def", "= initial_amount_inflation_adjusted * self.ceiling amount = max(amount, floor) amount =", "Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your Money", "amount = max(amount, floor) amount = min(amount, ceiling) return amount", "* self.floor ceiling = initial_amount_inflation_adjusted * self.ceiling amount = max(amount,", "next(self): amount = self.rate * self.portfolio.value initial_amount_inflation_adjusted = self.initial_amount *", "self.ceiling amount = max(amount, floor) amount = min(amount, ceiling) return", "= Decimal(rate) def start(self): amount = self.rate * self.portfolio.value self.initial_amount", "import WithdrawalStrategy # Bengen's Floor-to-Ceiling, as described in McClung's Living", "self.rate * self.portfolio.value self.initial_amount = amount return amount def next(self):", ".abc import WithdrawalStrategy # Bengen's Floor-to-Ceiling, as described in McClung's", "Your Money class FloorCeiling(WithdrawalStrategy): def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9,", "self.rate = Decimal(rate) def start(self): amount = self.rate * self.portfolio.value", "amount return amount def next(self): amount = self.rate * self.portfolio.value", "initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation floor = initial_amount_inflation_adjusted * self.floor", "Living Off Your Money class FloorCeiling(WithdrawalStrategy): def __init__(self, portfolio, harvest_strategy,", "class FloorCeiling(WithdrawalStrategy): def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25): super().__init__(portfolio,", "FloorCeiling(WithdrawalStrategy): def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25): super().__init__(portfolio, harvest_strategy)", "import Decimal from .abc import WithdrawalStrategy # Bengen's Floor-to-Ceiling, as", "self.rate * self.portfolio.value initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation floor =", "Off Your Money class FloorCeiling(WithdrawalStrategy): def __init__(self, portfolio, harvest_strategy, rate=.05,", "amount = self.rate * self.portfolio.value self.initial_amount = amount return amount", "ceiling = initial_amount_inflation_adjusted * self.ceiling amount = max(amount, floor) amount", "= Decimal(ceiling) self.rate = Decimal(rate) def start(self): amount = self.rate", "self.initial_amount * self.cumulative_inflation floor = initial_amount_inflation_adjusted * self.floor ceiling =", "Decimal(floor) self.ceiling = Decimal(ceiling) self.rate = Decimal(rate) def start(self): amount", "return amount def next(self): amount = self.rate * self.portfolio.value initial_amount_inflation_adjusted", "* self.portfolio.value self.initial_amount = amount return amount def next(self): amount", "# Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your", "self.initial_amount = amount return amount def next(self): amount = self.rate", "McClung's Living Off Your Money class FloorCeiling(WithdrawalStrategy): def __init__(self, portfolio,", "amount = self.rate * self.portfolio.value initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation", "= Decimal(floor) self.ceiling = Decimal(ceiling) self.rate = Decimal(rate) def start(self):", "in McClung's Living Off Your Money class FloorCeiling(WithdrawalStrategy): def __init__(self,", "self.floor ceiling = initial_amount_inflation_adjusted * self.ceiling amount = max(amount, floor)", "= initial_amount_inflation_adjusted * self.floor ceiling = initial_amount_inflation_adjusted * self.ceiling amount", "* self.ceiling amount = max(amount, floor) amount = min(amount, ceiling)", "Money class FloorCeiling(WithdrawalStrategy): def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25):", "decimal import Decimal from .abc import WithdrawalStrategy # Bengen's Floor-to-Ceiling,", "self.portfolio.value self.initial_amount = amount return amount def next(self): amount =" ]
[ "dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7) self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \\", "h h = 419430400 if h > 419430400 else h", "self._l8_barray raw_press = ((readout[0] << 16) | (readout[1] << 8)", "0xF2 BME280_REGISTER_CONTROL = 0xF4 BME280_REGISTER_CONTROL_IIR = 0xF5 FILTER_OFF = 0", "= (raw_temp >> 4) - self.dig_T1 var2 = var2 *", "(self.dig_P7 << 4) h = self.t_fine - 76800 h =", "humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS, iir=FILTER_16, address=BME280_I2CADDR, i2c=None): osamples = [ OSAMPLE_0, OSAMPLE_1,", "* self.dig_H3) >> 11) + 32768)) >> 10) + 2097152)", "11) var2 = (raw_temp >> 4) - self.dig_T1 var2 =", "2 OSAMPLE_4 = 3 OSAMPLE_8 = 4 OSAMPLE_16 = 5", "\\ self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \\ _, self.dig_H1 = unpack(\"<HhhHhhhhhhhhBB\",", "// var1 var1 = (self.dig_P9 * (p >> 13) *", "\"+self.temperature_scale return (temp.format(t), \"{} Pa\".format(p), \"{} %\".format(h)) @property def temperature(self):", "humidity(self): _, _, h = self.values return h def altitude(self,", "| (dig_e1_e7[4] & 0xF) e6_sign = unpack_from(\"<b\", dig_e1_e7, 5)[0] self.dig_H5", "6)[0] self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, bytearray([0x24])) time.sleep(0.002) self.t_fine = 0 self._l1_barray", ">> 11) var2 = (raw_temp >> 4) - self.dig_T1 var2", "pd = (p % 256)/256 return (pi, pd) @property def", "self.iir << 2 self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_IIR, self._l1_barray) time.sleep(0.002) self._l1_barray[0] =", "self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, self._l1_barray) osamples_1_16 = [ OSAMPLE_1, OSAMPLE_2, OSAMPLE_4,", "25 var2 = (self.dig_P8 * p) >> 19 pressure =", "@property def humidity(self): _, _, h = self.values return h", "<< 12)) var1 = (((1 << 47) + var1) *", "8) | readout[5]) >> 4 raw_hum = (readout[6] << 8)", "* self.dig_T3) >> 14 self.t_fine = var1 + var2 temp", ">> 25 var2 = (self.dig_P8 * p) >> 19 pressure", "<reponame>rcolistete/MicroPython_MiniCurso_ProjOrientado \"\"\" MicroPython driver for Bosh BME280 temperature, pressure and", "while (unpack('<H', self.i2c.readfrom_mem( self.address, BME280_REGISTER_STATUS, 2))[0] & 0x08): time.sleep(0.001) self.i2c.readfrom_mem_into(self.address,", ">> 19 pressure = ((p + var1 + var2) >>", "= (e6_sign << 4) | (dig_e1_e7[4] >> 4) self.dig_H6 =", "License (https://opensource.org/licenses/MIT) \"\"\" import time from ustruct import unpack, unpack_from", "var2) >> 8) + (self.dig_P7 << 4) h = self.t_fine", "is required.') self.i2c = i2c dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26)", "if self.temperature_scale == 'F': temp = 32 + (temp*1.8) elif", "osamples_1_16 = [ OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] sleep_time =", "self.dig_P7, self.dig_P8, self.dig_P9, \\ _, self.dig_H1 = unpack(\"<HhhHhhhhhhhhBB\", dig_88_a1) self.dig_H2,", "temp = 32 + (temp*1.8) elif self.temperature_scale == 'K': temp", "0xF3 BME280_REGISTER_CONTROL_HUM = 0xF2 BME280_REGISTER_CONTROL = 0xF4 BME280_REGISTER_CONTROL_IIR = 0xF5", "[ OSAMPLE_0, OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] msg_error = 'Unexpected", "result[2] = humidity return result return array(\"i\", (temp, pressure, humidity))", "<< 16) | (readout[1] << 8) | readout[2]) >> 4", "self._l1_barray) time.sleep(0.002) self._l1_barray[0] = self.humidity_mode self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray) def", "array(\"i\", (temp, pressure, humidity)) @property def values(self): temp, pres, humi", "h = (((((raw_hum << 14) - (self.dig_H4 << 20) -", "h - (((((h >> 15) * (h >> 15)) >>", "return array(\"i\", (temp, pressure, humidity)) @property def values(self): temp, pres,", "(((h * self.dig_H3) >> 11) + 32768)) >> 10) +", "unpack_from(\"<b\", dig_e1_e7, 6)[0] self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, bytearray([0x24])) time.sleep(0.002) self.t_fine =", "0xE1, 7) self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \\ self.dig_P2, self.dig_P3, self.dig_P4,", "BME280_REGISTER_CONTROL_HUM, self._l1_barray) def read_raw_data(self, result): self._l1_barray[0] = ( self.pressure_mode <<", "not in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]: raise ValueError(msg_error.format(iir)) self.iir", "= address if i2c is None: raise ValueError('An I2C object", "<< 17) var2 = var2 + (self.dig_P4 << 35) var1", "(https://opensource.org/licenses/MIT) \"\"\" import time from ustruct import unpack, unpack_from from", "def values(self): temp, pres, humi = self.read_compensated_data() temp = temp/100", "= [ OSAMPLE_0, OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] msg_error =", "4 CELSIUS = 'C' FAHRENHEIT = 'F' KELVIN = 'K'", "ustruct import unpack, unpack_from from array import array # BME280", "0xF) e6_sign = unpack_from(\"<b\", dig_e1_e7, 5)[0] self.dig_H5 = (e6_sign <<", "var1 = ((raw_temp >> 3) - (self.dig_T1 << 1)) *", "+ var2) >> 8) + (self.dig_P7 << 4) h =", "'Unexpected low pass IIR filter setting value {0}.' if iir", "elif self.temperature_scale == 'K': temp = temp + 273.15 pres", "BME280_REGISTER_CONTROL_IIR, self._l1_barray) time.sleep(0.002) self._l1_barray[0] = self.humidity_mode self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray)", "osamples_1_16: sleep_time += 575 + (2300*(1 << self.pressure_mode)) if self.humidity_mode", "= temp result[1] = pressure result[2] = humidity return result", "& 0x08): time.sleep(0.001) self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray) readout = self._l8_barray raw_press", "11) + 32768)) >> 10) + 2097152) * self.dig_H2 +", "var2 + ((var1 * self.dig_P5) << 17) var2 = var2", "= 'F' KELVIN = 'K' class BME280(object): def __init__(self, temperature_mode=OSAMPLE_2,", "+ 2097152) * self.dig_H2 + 8192) >> 14)) h =", "(p % 256)/256 return (pi, pd) @property def humidity(self): _,", "pressure_mode if humidity_mode not in osamples: raise ValueError(msg_error.format(\"humidity\", humidity_mode)) self.humidity_mode", "readout = self._l8_barray raw_press = ((readout[0] << 16) | (readout[1]", "= 1 OSAMPLE_2 = 2 OSAMPLE_4 = 3 OSAMPLE_8 =", "* self.dig_P3) >> 8) + ((var1 * self.dig_P2) << 12))", "0 else: p = 1048576 - raw_press p = (((p", "19 pressure = ((p + var1 + var2) >> 8)", "ValueError(msg_error.format(\"pressure\", pressure_mode)) self.pressure_mode = pressure_mode if humidity_mode not in osamples:", "dig_e1_e7, 5)[0] self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4] >>", ">> 13)) >> 25 var2 = (self.dig_P8 * p) >>", "p, _ = self.values return p @property def pressure_precision(self): _,", "var1 var1 = (self.dig_P9 * (p >> 13) * (p", "self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray) def read_raw_data(self, result): self._l1_barray[0] = (", "def formated_values(self): t, p, h = self.values temp = \"{}", ">> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11)", "7) * self.dig_H1) >> 4) h = 0 if h", "self.dig_H1) >> 4) h = 0 if h < 0", "var2 temp = (self.t_fine * 5 + 128) >> 8", "self.pressure_mode << 5 | self.temperature_mode << 2 | 1) self.i2c.writeto_mem(", "33 if var1 == 0: pressure = 0 else: p", "pressure = 0 else: p = 1048576 - raw_press p", "pres = pres/256 humi = humi/1024 return (temp, pres, humi)", "pres/256 humi = humi/1024 return (temp, pres, humi) @property def", "var2 + (self.dig_P4 << 35) var1 = (((var1 * var1", "% 256)/256 return (pi, pd) @property def humidity(self): _, _,", "low pass IIR filter setting value {0}.' if iir not", "self.address, BME280_REGISTER_CONTROL, self._l1_barray) osamples_1_16 = [ OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8,", "(self.dig_T1 << 1)) * (self.dig_T2 >> 11) var2 = (raw_temp", "var1 == 0: pressure = 0 else: p = 1048576", "_, _, h = self.values return h def altitude(self, pressure_sea_level=1013.25):", "self.dig_P8, self.dig_P9, \\ _, self.dig_H1 = unpack(\"<HhhHhhhhhhhhBB\", dig_88_a1) self.dig_H2, self.dig_H3", "= 1 FILTER_4 = 2 FILTER_8 = 3 FILTER_16 =", "self.dig_T1 var2 = var2 * ((raw_temp >> 4) - self.dig_T1)", "* self.dig_P5) << 17) var2 = var2 + (self.dig_P4 <<", "OSAMPLE_1 = 1 OSAMPLE_2 = 2 OSAMPLE_4 = 3 OSAMPLE_8", "self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray) def read_raw_data(self, result): self._l1_barray[0] = ( self.pressure_mode", "pass IIR filter setting value {0}.' if iir not in", "osamples_1_16: sleep_time += 2300*(1 << self.temperature_mode) if self.pressure_mode in osamples_1_16:", "del msg_error self.address = address if i2c is None: raise", "import time from ustruct import unpack, unpack_from from array import", "13)) >> 25 var2 = (self.dig_P8 * p) >> 19", "\"{} Pa\".format(p), \"{} %\".format(h)) @property def temperature(self): t, _, _", "Get raw data and compensa the same \"\"\" self.read_raw_data(self._l3_resultarray) raw_temp,", "{0}.' if iir not in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]:", "self.temperature_scale == 'F': temp = 32 + (temp*1.8) elif self.temperature_scale", "var2 = var2 + (self.dig_P4 << 35) var1 = (((var1", "= var2 + ((var1 * self.dig_P5) << 17) var2 =", "BME280_REGISTER_STATUS = 0xF3 BME280_REGISTER_CONTROL_HUM = 0xF2 BME280_REGISTER_CONTROL = 0xF4 BME280_REGISTER_CONTROL_IIR", "humidity_mode not in osamples: raise ValueError(msg_error.format(\"humidity\", humidity_mode)) self.humidity_mode = humidity_mode", "= temp/100 if self.temperature_scale == 'F': temp = 32 +", "iir not in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]: raise ValueError(msg_error.format(iir))", "msg_error self.address = address if i2c is None: raise ValueError('An", "self.humidity_mode self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray) def read_raw_data(self, result): self._l1_barray[0] =", "None: raise ValueError('An I2C object is required.') self.i2c = i2c", "array import array # BME280 default address BME280_I2CADDR = 0x76", "osamples: raise ValueError(msg_error.format(\"pressure\", pressure_mode)) self.pressure_mode = pressure_mode if humidity_mode not", "(unpack('<H', self.i2c.readfrom_mem( self.address, BME280_REGISTER_STATUS, 2))[0] & 0x08): time.sleep(0.001) self.i2c.readfrom_mem_into(self.address, 0xF7,", "= 4 CELSIUS = 'C' FAHRENHEIT = 'F' KELVIN =", "<< 8) | readout[5]) >> 4 raw_hum = (readout[6] <<", "> 419430400 else h humidity = h >> 12 if", "@property def values(self): temp, pres, humi = self.read_compensated_data() temp =", "pressure_mode)) self.pressure_mode = pressure_mode if humidity_mode not in osamples: raise", "if self.pressure_mode in osamples_1_16: sleep_time += 575 + (2300*(1 <<", "8) + ((var1 * self.dig_P2) << 12)) var1 = (((1", "{0}.' if temperature_mode not in osamples: raise ValueError(msg_error.format(\"temperature\", temperature_mode)) self.temperature_mode", "raw_press result[2] = raw_hum def read_compensated_data(self, result=None): \"\"\" Get raw", "_, h = self.values return h def altitude(self, pressure_sea_level=1013.25): pi,", "= 2 FILTER_8 = 3 FILTER_16 = 4 CELSIUS =", "Bosh BME280 temperature, pressure and humidity I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors:", "47) + var1) * self.dig_P1) >> 33 if var1 ==", "1) self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, self._l1_barray) osamples_1_16 = [ OSAMPLE_1, OSAMPLE_2,", "self.i2c.readfrom_mem(self.address, 0xE1, 7) self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \\ self.dig_P2, self.dig_P3,", "dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26) dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7)", "OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] msg_error = 'Unexpected {} operating mode value", ">> 15)) >> 7) * self.dig_H1) >> 4) h =", "<< 8) | readout[2]) >> 4 raw_temp = ((readout[3] <<", "self.dig_P5, \\ self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \\ _, self.dig_H1 =", "pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS, iir=FILTER_16, address=BME280_I2CADDR, i2c=None): osamples = [ OSAMPLE_0,", "_, self.dig_H1 = unpack(\"<HhhHhhhhhhhhBB\", dig_88_a1) self.dig_H2, self.dig_H3 = unpack(\"<hB\", dig_e1_e7)", "result[1] = raw_press result[2] = raw_hum def read_compensated_data(self, result=None): \"\"\"", "= unpack_from(\"<b\", dig_e1_e7, 3)[0] self.dig_H4 = (e4_sign << 4) |", "scale value {0}.' if temperature_scale not in [CELSIUS, FAHRENHEIT, KELVIN]:", "result): self._l1_barray[0] = ( self.pressure_mode << 5 | self.temperature_mode <<", "osamples: raise ValueError(msg_error.format(\"temperature\", temperature_mode)) self.temperature_mode = temperature_mode if pressure_mode not", "((var2 >> 12) * self.dig_T3) >> 14 self.t_fine = var1", "= ( self.pressure_mode << 5 | self.temperature_mode << 2 |", "= self.read_compensated_data() pi = float(p // 256) pd = (p", "2))[0] & 0x08): time.sleep(0.001) self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray) readout = self._l8_barray", "self.humidity_mode = humidity_mode msg_error = 'Unexpected low pass IIR filter", "var1 = (self.dig_P9 * (p >> 13) * (p >>", "= self.values return p @property def pressure_precision(self): _, p, _", "float(p // 256) pd = (p % 256)/256 return (pi,", "BME280_I2CADDR = 0x77 OSAMPLE_0 = 0 OSAMPLE_1 = 1 OSAMPLE_2", "2 FILTER_8 = 3 FILTER_16 = 4 CELSIUS = 'C'", "- self.dig_T1) var2 = ((var2 >> 12) * self.dig_T3) >>", "self.temperature_scale = temperature_scale del msg_error self.address = address if i2c", "%\".format(h)) @property def temperature(self): t, _, _ = self.values return", "\"\"\" self.read_raw_data(self._l3_resultarray) raw_temp, raw_press, raw_hum = self._l3_resultarray var1 = ((raw_temp", "raw_temp, raw_press, raw_hum = self._l3_resultarray var1 = ((raw_temp >> 3)", "= 5 BME280_REGISTER_STATUS = 0xF3 BME280_REGISTER_CONTROL_HUM = 0xF2 BME280_REGISTER_CONTROL =", "2097152) * self.dig_H2 + 8192) >> 14)) h = h", "self._l1_barray = bytearray(1) self._l8_barray = bytearray(8) self._l3_resultarray = array(\"i\", [0,", "12 if result: result[0] = temp result[1] = pressure result[2]", "ValueError('An I2C object is required.') self.i2c = i2c dig_88_a1 =", "(self.dig_H4 << 20) - (self.dig_H5 * h)) + 16384) >>", "self.dig_H6) >> 10) * (((h * self.dig_H3) >> 11) +", "if i2c is None: raise ValueError('An I2C object is required.')", "time.sleep_us(sleep_time) while (unpack('<H', self.i2c.readfrom_mem( self.address, BME280_REGISTER_STATUS, 2))[0] & 0x08): time.sleep(0.001)", "= ((readout[0] << 16) | (readout[1] << 8) | readout[2])", "if temperature_scale not in [CELSIUS, FAHRENHEIT, KELVIN]: raise ValueError(msg_error.format(temperature_scale)) self.temperature_scale", "self.dig_T3, self.dig_P1, \\ self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \\ self.dig_P6, self.dig_P7,", "sleep_time += 575 + (2300*(1 << self.pressure_mode)) if self.humidity_mode in", "self.temperature_mode = temperature_mode if pressure_mode not in osamples: raise ValueError(msg_error.format(\"pressure\",", "3.1.2 @ 2018/04 License: MIT License (https://opensource.org/licenses/MIT) \"\"\" import time", "= 0xF2 BME280_REGISTER_CONTROL = 0xF4 BME280_REGISTER_CONTROL_IIR = 0xF5 FILTER_OFF =", "= pressure_mode if humidity_mode not in osamples: raise ValueError(msg_error.format(\"humidity\", humidity_mode))", "raw_temp result[1] = raw_press result[2] = raw_hum def read_compensated_data(self, result=None):", "\\ self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \\ self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9,", "= [ OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] sleep_time = 1250", "{0}.' if temperature_scale not in [CELSIUS, FAHRENHEIT, KELVIN]: raise ValueError(msg_error.format(temperature_scale))", "1 OSAMPLE_2 = 2 OSAMPLE_4 = 3 OSAMPLE_8 = 4", "\"\"\" MicroPython driver for Bosh BME280 temperature, pressure and humidity", "= humidity_mode msg_error = 'Unexpected low pass IIR filter setting", "BME280 temperature, pressure and humidity I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: <NAME>,", "temperature_mode not in osamples: raise ValueError(msg_error.format(\"temperature\", temperature_mode)) self.temperature_mode = temperature_mode", "= 'K' class BME280(object): def __init__(self, temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS,", "13) * (p >> 13)) >> 25 var2 = (self.dig_P8", "3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11) var2", "((p + var1 + var2) >> 8) + (self.dig_P7 <<", "__init__(self, temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS, iir=FILTER_16, address=BME280_I2CADDR, i2c=None): osamples =", "< 0 else h h = 419430400 if h >", "= 0x76 # BME280_I2CADDR = 0x77 OSAMPLE_0 = 0 OSAMPLE_1", "def pressure_precision(self): _, p, _ = self.read_compensated_data() pi = float(p", "5 BME280_REGISTER_STATUS = 0xF3 BME280_REGISTER_CONTROL_HUM = 0xF2 BME280_REGISTER_CONTROL = 0xF4", "i2c is None: raise ValueError('An I2C object is required.') self.i2c", "humi = self.read_compensated_data() temp = temp/100 if self.temperature_scale == 'F':", "import array # BME280 default address BME280_I2CADDR = 0x76 #", "= 3 OSAMPLE_8 = 4 OSAMPLE_16 = 5 BME280_REGISTER_STATUS =", "+ 8192) >> 14)) h = h - (((((h >>", "(2300*(1 << self.pressure_mode)) if self.humidity_mode in osamples_1_16: sleep_time += 575", "required.') self.i2c = i2c dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26) dig_e1_e7", ">> 15) * (((((((h * self.dig_H6) >> 10) * (((h", "@property def temperature(self): t, _, _ = self.values return t", ">> 4) self.dig_H6 = unpack_from(\"<b\", dig_e1_e7, 6)[0] self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL,", "self.t_fine - 128000 var2 = var1 * var1 * self.dig_P6", "sleep_time += 575 + (2300*(1 << self.humidity_mode)) time.sleep_us(sleep_time) while (unpack('<H',", "if h > 419430400 else h humidity = h >>", "h = 0 if h < 0 else h h", "(self.dig_H5 * h)) + 16384) >> 15) * (((((((h *", "var1) * self.dig_P1) >> 33 if var1 == 0: pressure", "BME280_I2CADDR = 0x76 # BME280_I2CADDR = 0x77 OSAMPLE_0 = 0", "temp result[1] = pressure result[2] = humidity return result return", "* self.dig_H1) >> 4) h = 0 if h <", "FAHRENHEIT = 'F' KELVIN = 'K' class BME280(object): def __init__(self,", "time.sleep(0.002) self.t_fine = 0 self._l1_barray = bytearray(1) self._l8_barray = bytearray(8)", "= var2 + (self.dig_P4 << 35) var1 = (((var1 *", "if h < 0 else h h = 419430400 if", "p @property def pressure_precision(self): _, p, _ = self.read_compensated_data() pi", "self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_IIR, self._l1_barray) time.sleep(0.002) self._l1_barray[0] = self.humidity_mode self.i2c.writeto_mem( self.address,", "self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4] & 0xF) e6_sign", "OSAMPLE_4 = 3 OSAMPLE_8 = 4 OSAMPLE_16 = 5 BME280_REGISTER_STATUS", "self.i2c.readfrom_mem(self.address, 0x88, 26) dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7) self.dig_T1, self.dig_T2,", "temperature_scale del msg_error self.address = address if i2c is None:", "self.address, BME280_REGISTER_STATUS, 2))[0] & 0x08): time.sleep(0.001) self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray) readout", "raise ValueError(msg_error.format(iir)) self.iir = iir msg_error = 'Unexpected temperature scale", "IIR filter setting value {0}.' if iir not in [FILTER_OFF,", "self.address, BME280_REGISTER_CONTROL, bytearray([0x24])) time.sleep(0.002) self.t_fine = 0 self._l1_barray = bytearray(1)", "\"\"\" Get raw data and compensa the same \"\"\" self.read_raw_data(self._l3_resultarray)", "= 0x77 OSAMPLE_0 = 0 OSAMPLE_1 = 1 OSAMPLE_2 =", "((readout[3] << 16) | (readout[4] << 8) | readout[5]) >>", "if result: result[0] = temp result[1] = pressure result[2] =", "ValueError(msg_error.format(temperature_scale)) self.temperature_scale = temperature_scale del msg_error self.address = address if", "8) | readout[7] result[0] = raw_temp result[1] = raw_press result[2]", "= 0 self._l1_barray = bytearray(1) self._l8_barray = bytearray(8) self._l3_resultarray =", "= iir msg_error = 'Unexpected temperature scale value {0}.' if", "https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: <NAME>, <NAME> Version: 3.1.2 @ 2018/04 License: MIT", "self.dig_T3) >> 14 self.t_fine = var1 + var2 temp =", "0 OSAMPLE_1 = 1 OSAMPLE_2 = 2 OSAMPLE_4 = 3", "License: MIT License (https://opensource.org/licenses/MIT) \"\"\" import time from ustruct import", "self._l1_barray[0] = ( self.pressure_mode << 5 | self.temperature_mode << 2", "(self.dig_P8 * p) >> 19 pressure = ((p + var1", "OSAMPLE_0 = 0 OSAMPLE_1 = 1 OSAMPLE_2 = 2 OSAMPLE_4", "humi = humi/1024 return (temp, pres, humi) @property def formated_values(self):", "osamples: raise ValueError(msg_error.format(\"humidity\", humidity_mode)) self.humidity_mode = humidity_mode msg_error = 'Unexpected", "self.pressure_mode)) if self.humidity_mode in osamples_1_16: sleep_time += 575 + (2300*(1", "var2 = var2 + ((var1 * self.dig_P5) << 17) var2", "result[1] = pressure result[2] = humidity return result return array(\"i\",", "(e4_sign << 4) | (dig_e1_e7[4] & 0xF) e6_sign = unpack_from(\"<b\",", "in osamples: raise ValueError(msg_error.format(\"temperature\", temperature_mode)) self.temperature_mode = temperature_mode if pressure_mode", "// 256) pd = (p % 256)/256 return (pi, pd)", "self.address = address if i2c is None: raise ValueError('An I2C", "+ (self.dig_P4 << 35) var1 = (((var1 * var1 *", "((var1 * self.dig_P2) << 12)) var1 = (((1 << 47)", "address if i2c is None: raise ValueError('An I2C object is", "@property def pressure_precision(self): _, p, _ = self.read_compensated_data() pi =", "+ (2300*(1 << self.humidity_mode)) time.sleep_us(sleep_time) while (unpack('<H', self.i2c.readfrom_mem( self.address, BME280_REGISTER_STATUS,", "h def altitude(self, pressure_sea_level=1013.25): pi, pd = self.pressure_precision() return 44330*(1-((float(pi+pd)/100)/pressure_sea_level)**(1/5.255))", "return (pi, pd) @property def humidity(self): _, _, h =", "in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]: raise ValueError(msg_error.format(iir)) self.iir =", "1 FILTER_4 = 2 FILTER_8 = 3 FILTER_16 = 4", "1)) * (self.dig_T2 >> 11) var2 = (raw_temp >> 4)", ">> 12 if result: result[0] = temp result[1] = pressure", "var2 = ((var2 >> 12) * self.dig_T3) >> 14 self.t_fine", "10) + 2097152) * self.dig_H2 + 8192) >> 14)) h", "0 else h h = 419430400 if h > 419430400", "{} operating mode value {0}.' if temperature_mode not in osamples:", "osamples = [ OSAMPLE_0, OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] msg_error", "pres, humi) @property def formated_values(self): t, p, h = self.values", "| self.temperature_mode << 2 | 1) self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, self._l1_barray)", "Authors: <NAME>, <NAME> Version: 3.1.2 @ 2018/04 License: MIT License", "unpack_from(\"<b\", dig_e1_e7, 5)[0] self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4]", "+= 575 + (2300*(1 << self.pressure_mode)) if self.humidity_mode in osamples_1_16:", "128) >> 8 var1 = self.t_fine - 128000 var2 =", "<< 5 | self.temperature_mode << 2 | 1) self.i2c.writeto_mem( self.address,", "= h >> 12 if result: result[0] = temp result[1]", "<< self.temperature_mode) if self.pressure_mode in osamples_1_16: sleep_time += 575 +", "self._l3_resultarray = array(\"i\", [0, 0, 0]) self._l1_barray[0] = self.iir <<", "(readout[1] << 8) | readout[2]) >> 4 raw_temp = ((readout[3]", "same \"\"\" self.read_raw_data(self._l3_resultarray) raw_temp, raw_press, raw_hum = self._l3_resultarray var1 =", "self.i2c = i2c dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26) dig_e1_e7 =", "self.pressure_mode in osamples_1_16: sleep_time += 575 + (2300*(1 << self.pressure_mode))", "pressure = ((p + var1 + var2) >> 8) +", "sleep_time = 1250 if self.temperature_mode in osamples_1_16: sleep_time += 2300*(1", "return (temp.format(t), \"{} Pa\".format(p), \"{} %\".format(h)) @property def temperature(self): t,", "(dig_e1_e7[4] >> 4) self.dig_H6 = unpack_from(\"<b\", dig_e1_e7, 6)[0] self.i2c.writeto_mem( self.address,", "temp = (self.t_fine * 5 + 128) >> 8 var1", "value {0}.' if temperature_scale not in [CELSIUS, FAHRENHEIT, KELVIN]: raise", "= (readout[6] << 8) | readout[7] result[0] = raw_temp result[1]", "16384) >> 15) * (((((((h * self.dig_H6) >> 10) *", "msg_error = 'Unexpected low pass IIR filter setting value {0}.'", "return result return array(\"i\", (temp, pressure, humidity)) @property def values(self):", "(temp, pres, humi) @property def formated_values(self): t, p, h =", "= ((var2 >> 12) * self.dig_T3) >> 14 self.t_fine =", "raise ValueError(msg_error.format(temperature_scale)) self.temperature_scale = temperature_scale del msg_error self.address = address", "value {0}.' if iir not in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8,", "= temperature_scale del msg_error self.address = address if i2c is", "humi) @property def formated_values(self): t, p, h = self.values temp", "class BME280(object): def __init__(self, temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS, iir=FILTER_16, address=BME280_I2CADDR,", "= humi/1024 return (temp, pres, humi) @property def formated_values(self): t,", "self.read_raw_data(self._l3_resultarray) raw_temp, raw_press, raw_hum = self._l3_resultarray var1 = ((raw_temp >>", "<< 2 self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_IIR, self._l1_barray) time.sleep(0.002) self._l1_barray[0] = self.humidity_mode", "15) * (((((((h * self.dig_H6) >> 10) * (((h *", "'C' FAHRENHEIT = 'F' KELVIN = 'K' class BME280(object): def", "BME280(object): def __init__(self, temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS, iir=FILTER_16, address=BME280_I2CADDR, i2c=None):", ">> 4 raw_hum = (readout[6] << 8) | readout[7] result[0]", "= 'Unexpected {} operating mode value {0}.' if temperature_mode not", "14) - (self.dig_H4 << 20) - (self.dig_H5 * h)) +", "FAHRENHEIT, KELVIN]: raise ValueError(msg_error.format(temperature_scale)) self.temperature_scale = temperature_scale del msg_error self.address", "4) h = self.t_fine - 76800 h = (((((raw_hum <<", "formated_values(self): t, p, h = self.values temp = \"{} \"+self.temperature_scale", "* var1 * self.dig_P3) >> 8) + ((var1 * self.dig_P2)", "0xF7, self._l8_barray) readout = self._l8_barray raw_press = ((readout[0] << 16)", "16) | (readout[1] << 8) | readout[2]) >> 4 raw_temp", "(e6_sign << 4) | (dig_e1_e7[4] >> 4) self.dig_H6 = unpack_from(\"<b\",", "= self.i2c.readfrom_mem(self.address, 0xE1, 7) self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \\ self.dig_P2,", "= (self.t_fine * 5 + 128) >> 8 var1 =", "read_raw_data(self, result): self._l1_barray[0] = ( self.pressure_mode << 5 | self.temperature_mode", "<< 2 | 1) self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, self._l1_barray) osamples_1_16 =", "35) var1 = (((var1 * var1 * self.dig_P3) >> 8)", "if pressure_mode not in osamples: raise ValueError(msg_error.format(\"pressure\", pressure_mode)) self.pressure_mode =", "= 'Unexpected low pass IIR filter setting value {0}.' if", "\\ _, self.dig_H1 = unpack(\"<HhhHhhhhhhhhBB\", dig_88_a1) self.dig_H2, self.dig_H3 = unpack(\"<hB\",", "OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] sleep_time = 1250 if self.temperature_mode", "<< self.pressure_mode)) if self.humidity_mode in osamples_1_16: sleep_time += 575 +", "* var1 * self.dig_P6 var2 = var2 + ((var1 *", "var2 = var2 * ((raw_temp >> 4) - self.dig_T1) var2", "return (temp, pres, humi) @property def formated_values(self): t, p, h", "dig_e1_e7, 3)[0] self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4] &", "else h humidity = h >> 12 if result: result[0]", "self.values temp = \"{} \"+self.temperature_scale return (temp.format(t), \"{} Pa\".format(p), \"{}", "'F' KELVIN = 'K' class BME280(object): def __init__(self, temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16,", "FILTER_OFF = 0 FILTER_2 = 1 FILTER_4 = 2 FILTER_8", "in osamples_1_16: sleep_time += 575 + (2300*(1 << self.humidity_mode)) time.sleep_us(sleep_time)", "(h >> 15)) >> 7) * self.dig_H1) >> 4) h", "= self._l8_barray raw_press = ((readout[0] << 16) | (readout[1] <<", "256)/256 return (pi, pd) @property def humidity(self): _, _, h", ">> 15) * (h >> 15)) >> 7) * self.dig_H1)", "FILTER_16 = 4 CELSIUS = 'C' FAHRENHEIT = 'F' KELVIN", "not in osamples: raise ValueError(msg_error.format(\"pressure\", pressure_mode)) self.pressure_mode = pressure_mode if", "17) var2 = var2 + (self.dig_P4 << 35) var1 =", "26) dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7) self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1,", "in [CELSIUS, FAHRENHEIT, KELVIN]: raise ValueError(msg_error.format(temperature_scale)) self.temperature_scale = temperature_scale del", "(((((raw_hum << 14) - (self.dig_H4 << 20) - (self.dig_H5 *", ">> 11) + 32768)) >> 10) + 2097152) * self.dig_H2", "self.dig_P1) >> 33 if var1 == 0: pressure = 0", "* (p >> 13)) >> 25 var2 = (self.dig_P8 *", "+ 16384) >> 15) * (((((((h * self.dig_H6) >> 10)", "temp/100 if self.temperature_scale == 'F': temp = 32 + (temp*1.8)", "= var1 * var1 * self.dig_P6 var2 = var2 +", "FILTER_8, FILTER_16]: raise ValueError(msg_error.format(iir)) self.iir = iir msg_error = 'Unexpected", "p) >> 19 pressure = ((p + var1 + var2)", "h)) + 16384) >> 15) * (((((((h * self.dig_H6) >>", "= i2c dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26) dig_e1_e7 = self.i2c.readfrom_mem(self.address,", "8) + (self.dig_P7 << 4) h = self.t_fine - 76800", "<< 8) | readout[7] result[0] = raw_temp result[1] = raw_press", "self.dig_H2 + 8192) >> 14)) h = h - (((((h", "(((var1 * var1 * self.dig_P3) >> 8) + ((var1 *", "temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS, iir=FILTER_16, address=BME280_I2CADDR, i2c=None): osamples = [", "= self.values return h def altitude(self, pressure_sea_level=1013.25): pi, pd =", "h = self.values temp = \"{} \"+self.temperature_scale return (temp.format(t), \"{}", "(self.dig_P4 << 35) var1 = (((var1 * var1 * self.dig_P3)", "self.read_compensated_data() pi = float(p // 256) pd = (p %", "ValueError(msg_error.format(\"temperature\", temperature_mode)) self.temperature_mode = temperature_mode if pressure_mode not in osamples:", ">> 12) * self.dig_T3) >> 14 self.t_fine = var1 +", ">> 4) h = 0 if h < 0 else", "12) * self.dig_T3) >> 14 self.t_fine = var1 + var2", "FILTER_2, FILTER_4, FILTER_8, FILTER_16]: raise ValueError(msg_error.format(iir)) self.iir = iir msg_error", "8192) >> 14)) h = h - (((((h >> 15)", "= self.t_fine - 128000 var2 = var1 * var1 *", "FILTER_4, FILTER_8, FILTER_16]: raise ValueError(msg_error.format(iir)) self.iir = iir msg_error =", "+ (2300*(1 << self.pressure_mode)) if self.humidity_mode in osamples_1_16: sleep_time +=", "2 self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_IIR, self._l1_barray) time.sleep(0.002) self._l1_barray[0] = self.humidity_mode self.i2c.writeto_mem(", "<NAME>, <NAME> Version: 3.1.2 @ 2018/04 License: MIT License (https://opensource.org/licenses/MIT)", "def __init__(self, temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS, iir=FILTER_16, address=BME280_I2CADDR, i2c=None): osamples", "raw_hum = (readout[6] << 8) | readout[7] result[0] = raw_temp", "1048576 - raw_press p = (((p << 31) - var2)", "(pi, pd) @property def humidity(self): _, _, h = self.values", "3125) // var1 var1 = (self.dig_P9 * (p >> 13)", "273.15 pres = pres/256 humi = humi/1024 return (temp, pres,", "_, _ = self.values return t @property def pressure(self): _,", "(((((((h * self.dig_H6) >> 10) * (((h * self.dig_H3) >>", "(((((h >> 15) * (h >> 15)) >> 7) *", "bytearray(8) self._l3_resultarray = array(\"i\", [0, 0, 0]) self._l1_barray[0] = self.iir", "* 5 + 128) >> 8 var1 = self.t_fine -", "OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] sleep_time = 1250 if self.temperature_mode in", "self.dig_P6 var2 = var2 + ((var1 * self.dig_P5) << 17)", "= 0 FILTER_2 = 1 FILTER_4 = 2 FILTER_8 =", ">> 10) * (((h * self.dig_H3) >> 11) + 32768))", "575 + (2300*(1 << self.humidity_mode)) time.sleep_us(sleep_time) while (unpack('<H', self.i2c.readfrom_mem( self.address,", "self.read_compensated_data() temp = temp/100 if self.temperature_scale == 'F': temp =", "OSAMPLE_16 = 5 BME280_REGISTER_STATUS = 0xF3 BME280_REGISTER_CONTROL_HUM = 0xF2 BME280_REGISTER_CONTROL", "= self.iir << 2 self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_IIR, self._l1_barray) time.sleep(0.002) self._l1_barray[0]", "BME280_REGISTER_CONTROL_IIR = 0xF5 FILTER_OFF = 0 FILTER_2 = 1 FILTER_4", "self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \\ _, self.dig_H1 = unpack(\"<HhhHhhhhhhhhBB\", dig_88_a1)", "t @property def pressure(self): _, p, _ = self.values return", "OSAMPLE_16] msg_error = 'Unexpected {} operating mode value {0}.' if", "in osamples_1_16: sleep_time += 2300*(1 << self.temperature_mode) if self.pressure_mode in", "[0, 0, 0]) self._l1_barray[0] = self.iir << 2 self.i2c.writeto_mem( self.address,", "object is required.') self.i2c = i2c dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88,", "FILTER_2 = 1 FILTER_4 = 2 FILTER_8 = 3 FILTER_16", "read_compensated_data(self, result=None): \"\"\" Get raw data and compensa the same", "+ (self.dig_P7 << 4) h = self.t_fine - 76800 h", "4) h = 0 if h < 0 else h", ">> 14 self.t_fine = var1 + var2 temp = (self.t_fine", "p = 1048576 - raw_press p = (((p << 31)", "FILTER_8 = 3 FILTER_16 = 4 CELSIUS = 'C' FAHRENHEIT", "unpack_from(\"<b\", dig_e1_e7, 3)[0] self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4]", "= 0 else: p = 1048576 - raw_press p =", "(self.t_fine * 5 + 128) >> 8 var1 = self.t_fine", "2300*(1 << self.temperature_mode) if self.pressure_mode in osamples_1_16: sleep_time += 575", "CELSIUS = 'C' FAHRENHEIT = 'F' KELVIN = 'K' class", "= self.read_compensated_data() temp = temp/100 if self.temperature_scale == 'F': temp", "if temperature_mode not in osamples: raise ValueError(msg_error.format(\"temperature\", temperature_mode)) self.temperature_mode =", "'F': temp = 32 + (temp*1.8) elif self.temperature_scale == 'K':", "* (h >> 15)) >> 7) * self.dig_H1) >> 4)", "setting value {0}.' if iir not in [FILTER_OFF, FILTER_2, FILTER_4,", "2 | 1) self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, self._l1_barray) osamples_1_16 = [", "= 0 OSAMPLE_1 = 1 OSAMPLE_2 = 2 OSAMPLE_4 =", "self.dig_H1 = unpack(\"<HhhHhhhhhhhhBB\", dig_88_a1) self.dig_H2, self.dig_H3 = unpack(\"<hB\", dig_e1_e7) e4_sign", "unpack(\"<HhhHhhhhhhhhBB\", dig_88_a1) self.dig_H2, self.dig_H3 = unpack(\"<hB\", dig_e1_e7) e4_sign = unpack_from(\"<b\",", "= unpack_from(\"<b\", dig_e1_e7, 5)[0] self.dig_H5 = (e6_sign << 4) |", "(temp, pressure, humidity)) @property def values(self): temp, pres, humi =", "self.dig_P5) << 17) var2 = var2 + (self.dig_P4 << 35)", "= var2 * ((raw_temp >> 4) - self.dig_T1) var2 =", "= 0xF5 FILTER_OFF = 0 FILTER_2 = 1 FILTER_4 =", "self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \\ self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \\", "self._l8_barray = bytearray(8) self._l3_resultarray = array(\"i\", [0, 0, 0]) self._l1_barray[0]", "15)) >> 7) * self.dig_H1) >> 4) h = 0", "h humidity = h >> 12 if result: result[0] =", "self.values return h def altitude(self, pressure_sea_level=1013.25): pi, pd = self.pressure_precision()", "= unpack_from(\"<b\", dig_e1_e7, 6)[0] self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, bytearray([0x24])) time.sleep(0.002) self.t_fine", "self.i2c.readfrom_mem( self.address, BME280_REGISTER_STATUS, 2))[0] & 0x08): time.sleep(0.001) self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray)", "= 419430400 if h > 419430400 else h humidity =", ">> 4) - self.dig_T1 var2 = var2 * ((raw_temp >>", "76800 h = (((((raw_hum << 14) - (self.dig_H4 << 20)", "pressure_mode not in osamples: raise ValueError(msg_error.format(\"pressure\", pressure_mode)) self.pressure_mode = pressure_mode", "BME280_REGISTER_CONTROL_HUM = 0xF2 BME280_REGISTER_CONTROL = 0xF4 BME280_REGISTER_CONTROL_IIR = 0xF5 FILTER_OFF", "is None: raise ValueError('An I2C object is required.') self.i2c =", "(readout[4] << 8) | readout[5]) >> 4 raw_hum = (readout[6]", "import unpack, unpack_from from array import array # BME280 default", "= self.values return t @property def pressure(self): _, p, _", "self._l3_resultarray var1 = ((raw_temp >> 3) - (self.dig_T1 << 1))", "temperature scale value {0}.' if temperature_scale not in [CELSIUS, FAHRENHEIT,", "31) - var2) * 3125) // var1 var1 = (self.dig_P9", "self.temperature_mode << 2 | 1) self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, self._l1_barray) osamples_1_16", "sleep_time += 2300*(1 << self.temperature_mode) if self.pressure_mode in osamples_1_16: sleep_time", "from ustruct import unpack, unpack_from from array import array #", "result: result[0] = temp result[1] = pressure result[2] = humidity", "if self.temperature_mode in osamples_1_16: sleep_time += 2300*(1 << self.temperature_mode) if", "self._l1_barray[0] = self.humidity_mode self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray) def read_raw_data(self, result):", "| readout[2]) >> 4 raw_temp = ((readout[3] << 16) |", "( self.pressure_mode << 5 | self.temperature_mode << 2 | 1)", "OSAMPLE_2 = 2 OSAMPLE_4 = 3 OSAMPLE_8 = 4 OSAMPLE_16", "humidity = h >> 12 if result: result[0] = temp", "* 3125) // var1 var1 = (self.dig_P9 * (p >>", "pressure result[2] = humidity return result return array(\"i\", (temp, pressure,", "value {0}.' if temperature_mode not in osamples: raise ValueError(msg_error.format(\"temperature\", temperature_mode))", "<< 4) | (dig_e1_e7[4] & 0xF) e6_sign = unpack_from(\"<b\", dig_e1_e7,", "* ((raw_temp >> 4) - self.dig_T1) var2 = ((var2 >>", ">> 8) + (self.dig_P7 << 4) h = self.t_fine -", "array # BME280 default address BME280_I2CADDR = 0x76 # BME280_I2CADDR", "\"{} %\".format(h)) @property def temperature(self): t, _, _ = self.values", "MIT License (https://opensource.org/licenses/MIT) \"\"\" import time from ustruct import unpack,", "humidity)) @property def values(self): temp, pres, humi = self.read_compensated_data() temp", "= 0xF4 BME280_REGISTER_CONTROL_IIR = 0xF5 FILTER_OFF = 0 FILTER_2 =", "mode value {0}.' if temperature_mode not in osamples: raise ValueError(msg_error.format(\"temperature\",", "raise ValueError(msg_error.format(\"pressure\", pressure_mode)) self.pressure_mode = pressure_mode if humidity_mode not in", "= 2 OSAMPLE_4 = 3 OSAMPLE_8 = 4 OSAMPLE_16 =", "self.dig_P3, self.dig_P4, self.dig_P5, \\ self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \\ _,", "(p >> 13)) >> 25 var2 = (self.dig_P8 * p)", "== 'K': temp = temp + 273.15 pres = pres/256", "= h - (((((h >> 15) * (h >> 15))", "self._l1_barray[0] = self.iir << 2 self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_IIR, self._l1_barray) time.sleep(0.002)", "OSAMPLE_8 = 4 OSAMPLE_16 = 5 BME280_REGISTER_STATUS = 0xF3 BME280_REGISTER_CONTROL_HUM", "humidity_mode)) self.humidity_mode = humidity_mode msg_error = 'Unexpected low pass IIR", "KELVIN]: raise ValueError(msg_error.format(temperature_scale)) self.temperature_scale = temperature_scale del msg_error self.address =", "raw_press, raw_hum = self._l3_resultarray var1 = ((raw_temp >> 3) -", "self.dig_H3) >> 11) + 32768)) >> 10) + 2097152) *", "= 1048576 - raw_press p = (((p << 31) -", "_, p, _ = self.read_compensated_data() pi = float(p // 256)", "self._l1_barray) osamples_1_16 = [ OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] sleep_time", "temp + 273.15 pres = pres/256 humi = humi/1024 return", "= ((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2", "def temperature(self): t, _, _ = self.values return t @property", "filter setting value {0}.' if iir not in [FILTER_OFF, FILTER_2,", "self.values return p @property def pressure_precision(self): _, p, _ =", "Version: 3.1.2 @ 2018/04 License: MIT License (https://opensource.org/licenses/MIT) \"\"\" import", "== 0: pressure = 0 else: p = 1048576 -", "result[0] = temp result[1] = pressure result[2] = humidity return", "unpack, unpack_from from array import array # BME280 default address", "= raw_temp result[1] = raw_press result[2] = raw_hum def read_compensated_data(self,", "* h)) + 16384) >> 15) * (((((((h * self.dig_H6)", "readout[7] result[0] = raw_temp result[1] = raw_press result[2] = raw_hum", "= ((readout[3] << 16) | (readout[4] << 8) | readout[5])", "(((p << 31) - var2) * 3125) // var1 var1", "= (((((raw_hum << 14) - (self.dig_H4 << 20) - (self.dig_H5", "the same \"\"\" self.read_raw_data(self._l3_resultarray) raw_temp, raw_press, raw_hum = self._l3_resultarray var1", "- (self.dig_T1 << 1)) * (self.dig_T2 >> 11) var2 =", "e6_sign = unpack_from(\"<b\", dig_e1_e7, 5)[0] self.dig_H5 = (e6_sign << 4)", "= self.humidity_mode self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray) def read_raw_data(self, result): self._l1_barray[0]", "+ (temp*1.8) elif self.temperature_scale == 'K': temp = temp +", "t, p, h = self.values temp = \"{} \"+self.temperature_scale return", "self.dig_T2, self.dig_T3, self.dig_P1, \\ self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \\ self.dig_P6,", "+ ((var1 * self.dig_P2) << 12)) var1 = (((1 <<", "temperature_scale not in [CELSIUS, FAHRENHEIT, KELVIN]: raise ValueError(msg_error.format(temperature_scale)) self.temperature_scale =", "def read_compensated_data(self, result=None): \"\"\" Get raw data and compensa the", "(temp.format(t), \"{} Pa\".format(p), \"{} %\".format(h)) @property def temperature(self): t, _,", "def humidity(self): _, _, h = self.values return h def", "= var1 + var2 temp = (self.t_fine * 5 +", "BME280 default address BME280_I2CADDR = 0x76 # BME280_I2CADDR = 0x77", "+ 273.15 pres = pres/256 humi = humi/1024 return (temp,", "_ = self.values return p @property def pressure_precision(self): _, p,", "self.pressure_mode = pressure_mode if humidity_mode not in osamples: raise ValueError(msg_error.format(\"humidity\",", "OSAMPLE_8, OSAMPLE_16] sleep_time = 1250 if self.temperature_mode in osamples_1_16: sleep_time", "self.t_fine = 0 self._l1_barray = bytearray(1) self._l8_barray = bytearray(8) self._l3_resultarray", "- (self.dig_H4 << 20) - (self.dig_H5 * h)) + 16384)", "= (self.dig_P8 * p) >> 19 pressure = ((p +", "pi = float(p // 256) pd = (p % 256)/256", "not in osamples: raise ValueError(msg_error.format(\"temperature\", temperature_mode)) self.temperature_mode = temperature_mode if", "humidity_mode msg_error = 'Unexpected low pass IIR filter setting value", "(dig_e1_e7[4] & 0xF) e6_sign = unpack_from(\"<b\", dig_e1_e7, 5)[0] self.dig_H5 =", "h = 419430400 if h > 419430400 else h humidity", "I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: <NAME>, <NAME> Version: 3.1.2 @ 2018/04", "= (p % 256)/256 return (pi, pd) @property def humidity(self):", "\"{} \"+self.temperature_scale return (temp.format(t), \"{} Pa\".format(p), \"{} %\".format(h)) @property def", "256) pd = (p % 256)/256 return (pi, pd) @property", "= pres/256 humi = humi/1024 return (temp, pres, humi) @property", "+ ((var1 * self.dig_P5) << 17) var2 = var2 +", "= self._l3_resultarray var1 = ((raw_temp >> 3) - (self.dig_T1 <<", "raise ValueError(msg_error.format(\"humidity\", humidity_mode)) self.humidity_mode = humidity_mode msg_error = 'Unexpected low", "15) * (h >> 15)) >> 7) * self.dig_H1) >>", "result[0] = raw_temp result[1] = raw_press result[2] = raw_hum def", "- self.dig_T1 var2 = var2 * ((raw_temp >> 4) -", "pd) @property def humidity(self): _, _, h = self.values return", "operating mode value {0}.' if temperature_mode not in osamples: raise", "== 'F': temp = 32 + (temp*1.8) elif self.temperature_scale ==", "dig_e1_e7) e4_sign = unpack_from(\"<b\", dig_e1_e7, 3)[0] self.dig_H4 = (e4_sign <<", "self.dig_T1) var2 = ((var2 >> 12) * self.dig_T3) >> 14", "8) | readout[2]) >> 4 raw_temp = ((readout[3] << 16)", "self.iir = iir msg_error = 'Unexpected temperature scale value {0}.'", "(((1 << 47) + var1) * self.dig_P1) >> 33 if", "else h h = 419430400 if h > 419430400 else", "return t @property def pressure(self): _, p, _ = self.values", "pressure_precision(self): _, p, _ = self.read_compensated_data() pi = float(p //", "16) | (readout[4] << 8) | readout[5]) >> 4 raw_hum", "= unpack(\"<HhhHhhhhhhhhBB\", dig_88_a1) self.dig_H2, self.dig_H3 = unpack(\"<hB\", dig_e1_e7) e4_sign =", "not in osamples: raise ValueError(msg_error.format(\"humidity\", humidity_mode)) self.humidity_mode = humidity_mode msg_error", "0: pressure = 0 else: p = 1048576 - raw_press", "(p >> 13) * (p >> 13)) >> 25 var2", "self.dig_H6 = unpack_from(\"<b\", dig_e1_e7, 6)[0] self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, bytearray([0x24])) time.sleep(0.002)", "self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray) readout = self._l8_barray raw_press = ((readout[0] <<", "0xF4 BME280_REGISTER_CONTROL_IIR = 0xF5 FILTER_OFF = 0 FILTER_2 = 1", "readout[2]) >> 4 raw_temp = ((readout[3] << 16) | (readout[4]", "+ var1 + var2) >> 8) + (self.dig_P7 << 4)", "OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] msg_error = 'Unexpected {} operating", "= self.i2c.readfrom_mem(self.address, 0x88, 26) dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7) self.dig_T1,", "= (e4_sign << 4) | (dig_e1_e7[4] & 0xF) e6_sign =", "temperature(self): t, _, _ = self.values return t @property def", "0 if h < 0 else h h = 419430400", "self.dig_P9, \\ _, self.dig_H1 = unpack(\"<HhhHhhhhhhhhBB\", dig_88_a1) self.dig_H2, self.dig_H3 =", "((readout[0] << 16) | (readout[1] << 8) | readout[2]) >>", "<NAME> Version: 3.1.2 @ 2018/04 License: MIT License (https://opensource.org/licenses/MIT) \"\"\"", "575 + (2300*(1 << self.pressure_mode)) if self.humidity_mode in osamples_1_16: sleep_time", "4 raw_hum = (readout[6] << 8) | readout[7] result[0] =", "self.t_fine - 76800 h = (((((raw_hum << 14) - (self.dig_H4", "= self.values temp = \"{} \"+self.temperature_scale return (temp.format(t), \"{} Pa\".format(p),", "4) - self.dig_T1 var2 = var2 * ((raw_temp >> 4)", "= raw_hum def read_compensated_data(self, result=None): \"\"\" Get raw data and", "<< 4) | (dig_e1_e7[4] >> 4) self.dig_H6 = unpack_from(\"<b\", dig_e1_e7,", "= (((p << 31) - var2) * 3125) // var1", "4) - self.dig_T1) var2 = ((var2 >> 12) * self.dig_T3)", "+ var2 temp = (self.t_fine * 5 + 128) >>", "= (((var1 * var1 * self.dig_P3) >> 8) + ((var1", "unpack_from from array import array # BME280 default address BME280_I2CADDR", "ValueError(msg_error.format(iir)) self.iir = iir msg_error = 'Unexpected temperature scale value", "default address BME280_I2CADDR = 0x76 # BME280_I2CADDR = 0x77 OSAMPLE_0", "| (dig_e1_e7[4] >> 4) self.dig_H6 = unpack_from(\"<b\", dig_e1_e7, 6)[0] self.i2c.writeto_mem(", "8 var1 = self.t_fine - 128000 var2 = var1 *", "'K': temp = temp + 273.15 pres = pres/256 humi", "Pa\".format(p), \"{} %\".format(h)) @property def temperature(self): t, _, _ =", "temp = \"{} \"+self.temperature_scale return (temp.format(t), \"{} Pa\".format(p), \"{} %\".format(h))", "4) self.dig_H6 = unpack_from(\"<b\", dig_e1_e7, 6)[0] self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, bytearray([0x24]))", "<< 16) | (readout[4] << 8) | readout[5]) >> 4", "data and compensa the same \"\"\" self.read_raw_data(self._l3_resultarray) raw_temp, raw_press, raw_hum", "humidity return result return array(\"i\", (temp, pressure, humidity)) @property def", "h > 419430400 else h humidity = h >> 12", "[CELSIUS, FAHRENHEIT, KELVIN]: raise ValueError(msg_error.format(temperature_scale)) self.temperature_scale = temperature_scale del msg_error", "p, h = self.values temp = \"{} \"+self.temperature_scale return (temp.format(t),", "= self.t_fine - 76800 h = (((((raw_hum << 14) -", "419430400 if h > 419430400 else h humidity = h", "time.sleep(0.002) self._l1_barray[0] = self.humidity_mode self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_HUM, self._l1_barray) def read_raw_data(self,", "# BME280_I2CADDR = 0x77 OSAMPLE_0 = 0 OSAMPLE_1 = 1", "var2 = (raw_temp >> 4) - self.dig_T1 var2 = var2", "3 OSAMPLE_8 = 4 OSAMPLE_16 = 5 BME280_REGISTER_STATUS = 0xF3", "h = self.t_fine - 76800 h = (((((raw_hum << 14)", ">> 8) + ((var1 * self.dig_P2) << 12)) var1 =", "- (((((h >> 15) * (h >> 15)) >> 7)", "msg_error = 'Unexpected temperature scale value {0}.' if temperature_scale not", "KELVIN = 'K' class BME280(object): def __init__(self, temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1,", "0x88, 26) dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7) self.dig_T1, self.dig_T2, self.dig_T3,", "var2 * ((raw_temp >> 4) - self.dig_T1) var2 = ((var2", "raise ValueError('An I2C object is required.') self.i2c = i2c dig_88_a1", "| (readout[4] << 8) | readout[5]) >> 4 raw_hum =", "return h def altitude(self, pressure_sea_level=1013.25): pi, pd = self.pressure_precision() return", "7) self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \\ self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5,", "pres, humi = self.read_compensated_data() temp = temp/100 if self.temperature_scale ==", "if humidity_mode not in osamples: raise ValueError(msg_error.format(\"humidity\", humidity_mode)) self.humidity_mode =", "<< 31) - var2) * 3125) // var1 var1 =", "<< 20) - (self.dig_H5 * h)) + 16384) >> 15)", "self._l1_barray) def read_raw_data(self, result): self._l1_barray[0] = ( self.pressure_mode << 5", "= 4 OSAMPLE_16 = 5 BME280_REGISTER_STATUS = 0xF3 BME280_REGISTER_CONTROL_HUM =", "var1 + var2) >> 8) + (self.dig_P7 << 4) h", "I2C object is required.') self.i2c = i2c dig_88_a1 = self.i2c.readfrom_mem(self.address,", "= array(\"i\", [0, 0, 0]) self._l1_barray[0] = self.iir << 2", "| readout[5]) >> 4 raw_hum = (readout[6] << 8) |", "- (self.dig_H5 * h)) + 16384) >> 15) * (((((((h", "self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, bytearray([0x24])) time.sleep(0.002) self.t_fine = 0 self._l1_barray =", "raw_press = ((readout[0] << 16) | (readout[1] << 8) |", "((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >>", "self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4] >> 4) self.dig_H6", "var1 * self.dig_P3) >> 8) + ((var1 * self.dig_P2) <<", "+= 575 + (2300*(1 << self.humidity_mode)) time.sleep_us(sleep_time) while (unpack('<H', self.i2c.readfrom_mem(", "(temp*1.8) elif self.temperature_scale == 'K': temp = temp + 273.15", "self._l8_barray) readout = self._l8_barray raw_press = ((readout[0] << 16) |", "readout[5]) >> 4 raw_hum = (readout[6] << 8) | readout[7]", "in osamples: raise ValueError(msg_error.format(\"pressure\", pressure_mode)) self.pressure_mode = pressure_mode if humidity_mode", "* self.dig_P1) >> 33 if var1 == 0: pressure =", "* (p >> 13) * (p >> 13)) >> 25", "def read_raw_data(self, result): self._l1_barray[0] = ( self.pressure_mode << 5 |", "@property def formated_values(self): t, p, h = self.values temp =", "self.humidity_mode in osamples_1_16: sleep_time += 575 + (2300*(1 << self.humidity_mode))", "* (self.dig_T2 >> 11) var2 = (raw_temp >> 4) -", "* p) >> 19 pressure = ((p + var1 +", "unpack(\"<hB\", dig_e1_e7) e4_sign = unpack_from(\"<b\", dig_e1_e7, 3)[0] self.dig_H4 = (e4_sign", "time from ustruct import unpack, unpack_from from array import array", "msg_error = 'Unexpected {} operating mode value {0}.' if temperature_mode", "_, p, _ = self.values return p @property def pressure_precision(self):", "iir=FILTER_16, address=BME280_I2CADDR, i2c=None): osamples = [ OSAMPLE_0, OSAMPLE_1, OSAMPLE_2, OSAMPLE_4,", "= 3 FILTER_16 = 4 CELSIUS = 'C' FAHRENHEIT =", "raise ValueError(msg_error.format(\"temperature\", temperature_mode)) self.temperature_mode = temperature_mode if pressure_mode not in", "self.address, BME280_REGISTER_CONTROL_IIR, self._l1_barray) time.sleep(0.002) self._l1_barray[0] = self.humidity_mode self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_HUM,", "| readout[7] result[0] = raw_temp result[1] = raw_press result[2] =", "result=None): \"\"\" Get raw data and compensa the same \"\"\"", "_ = self.read_compensated_data() pi = float(p // 256) pd =", "* (((h * self.dig_H3) >> 11) + 32768)) >> 10)", "raw_press p = (((p << 31) - var2) * 3125)", "self.dig_P4, self.dig_P5, \\ self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \\ _, self.dig_H1", "= ((p + var1 + var2) >> 8) + (self.dig_P7", "raw_hum def read_compensated_data(self, result=None): \"\"\" Get raw data and compensa", ">> 10) + 2097152) * self.dig_H2 + 8192) >> 14))", "iir msg_error = 'Unexpected temperature scale value {0}.' if temperature_scale", "if iir not in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]: raise", "raw data and compensa the same \"\"\" self.read_raw_data(self._l3_resultarray) raw_temp, raw_press,", "temperature_mode if pressure_mode not in osamples: raise ValueError(msg_error.format(\"pressure\", pressure_mode)) self.pressure_mode", "if self.humidity_mode in osamples_1_16: sleep_time += 575 + (2300*(1 <<", "'K' class BME280(object): def __init__(self, temperature_mode=OSAMPLE_2, pressure_mode=OSAMPLE_16, humidity_mode=OSAMPLE_1, temperature_scale=CELSIUS, iir=FILTER_16,", "= 32 + (temp*1.8) elif self.temperature_scale == 'K': temp =", "temperature, pressure and humidity I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: <NAME>, <NAME>", "= temp + 273.15 pres = pres/256 humi = humi/1024", "self.dig_H3 = unpack(\"<hB\", dig_e1_e7) e4_sign = unpack_from(\"<b\", dig_e1_e7, 3)[0] self.dig_H4", "time.sleep(0.001) self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray) readout = self._l8_barray raw_press = ((readout[0]", "0 self._l1_barray = bytearray(1) self._l8_barray = bytearray(8) self._l3_resultarray = array(\"i\",", "var2 = (self.dig_P8 * p) >> 19 pressure = ((p", "FILTER_16]: raise ValueError(msg_error.format(iir)) self.iir = iir msg_error = 'Unexpected temperature", "(self.dig_P9 * (p >> 13) * (p >> 13)) >>", "3)[0] self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4] & 0xF)", "h = h - (((((h >> 15) * (h >>", "<< 47) + var1) * self.dig_P1) >> 33 if var1", "and humidity I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: <NAME>, <NAME> Version: 3.1.2", "temperature_scale=CELSIUS, iir=FILTER_16, address=BME280_I2CADDR, i2c=None): osamples = [ OSAMPLE_0, OSAMPLE_1, OSAMPLE_2,", "pressure, humidity)) @property def values(self): temp, pres, humi = self.read_compensated_data()", "self.temperature_scale == 'K': temp = temp + 273.15 pres =", "result return array(\"i\", (temp, pressure, humidity)) @property def values(self): temp,", "= 'Unexpected temperature scale value {0}.' if temperature_scale not in", "else: p = 1048576 - raw_press p = (((p <<", "(2300*(1 << self.humidity_mode)) time.sleep_us(sleep_time) while (unpack('<H', self.i2c.readfrom_mem( self.address, BME280_REGISTER_STATUS, 2))[0]", "self.temperature_mode in osamples_1_16: sleep_time += 2300*(1 << self.temperature_mode) if self.pressure_mode", "4) | (dig_e1_e7[4] & 0xF) e6_sign = unpack_from(\"<b\", dig_e1_e7, 5)[0]", "sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: <NAME>, <NAME> Version: 3.1.2 @ 2018/04 License:", "- 128000 var2 = var1 * var1 * self.dig_P6 var2", "* self.dig_P6 var2 = var2 + ((var1 * self.dig_P5) <<", "if var1 == 0: pressure = 0 else: p =", "14 self.t_fine = var1 + var2 temp = (self.t_fine *", "array(\"i\", [0, 0, 0]) self._l1_barray[0] = self.iir << 2 self.i2c.writeto_mem(", "10) * (((h * self.dig_H3) >> 11) + 32768)) >>", "* self.dig_H2 + 8192) >> 14)) h = h -", "* self.dig_P2) << 12)) var1 = (((1 << 47) +", "OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] sleep_time = 1250 if self.temperature_mode in osamples_1_16:", "i2c dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26) dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1,", "32 + (temp*1.8) elif self.temperature_scale == 'K': temp = temp", "bytearray([0x24])) time.sleep(0.002) self.t_fine = 0 self._l1_barray = bytearray(1) self._l8_barray =", "BME280_REGISTER_STATUS, 2))[0] & 0x08): time.sleep(0.001) self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray) readout =", "= raw_press result[2] = raw_hum def read_compensated_data(self, result=None): \"\"\" Get", "\"\"\" import time from ustruct import unpack, unpack_from from array", "+ 128) >> 8 var1 = self.t_fine - 128000 var2", "(readout[6] << 8) | readout[7] result[0] = raw_temp result[1] =", "4 OSAMPLE_16 = 5 BME280_REGISTER_STATUS = 0xF3 BME280_REGISTER_CONTROL_HUM = 0xF2", "self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \\ self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \\", "0x76 # BME280_I2CADDR = 0x77 OSAMPLE_0 = 0 OSAMPLE_1 =", "(raw_temp >> 4) - self.dig_T1 var2 = var2 * ((raw_temp", "BME280_REGISTER_CONTROL, bytearray([0x24])) time.sleep(0.002) self.t_fine = 0 self._l1_barray = bytearray(1) self._l8_barray", "32768)) >> 10) + 2097152) * self.dig_H2 + 8192) >>", "raw_temp = ((readout[3] << 16) | (readout[4] << 8) |", "0xF5 FILTER_OFF = 0 FILTER_2 = 1 FILTER_4 = 2", "- 76800 h = (((((raw_hum << 14) - (self.dig_H4 <<", "compensa the same \"\"\" self.read_raw_data(self._l3_resultarray) raw_temp, raw_press, raw_hum = self._l3_resultarray", "'Unexpected {} operating mode value {0}.' if temperature_mode not in", "= float(p // 256) pd = (p % 256)/256 return", "20) - (self.dig_H5 * h)) + 16384) >> 15) *", ">> 33 if var1 == 0: pressure = 0 else:", "self.t_fine = var1 + var2 temp = (self.t_fine * 5", "+= 2300*(1 << self.temperature_mode) if self.pressure_mode in osamples_1_16: sleep_time +=", "12)) var1 = (((1 << 47) + var1) * self.dig_P1)", "p = (((p << 31) - var2) * 3125) //", "def pressure(self): _, p, _ = self.values return p @property", "in osamples_1_16: sleep_time += 575 + (2300*(1 << self.pressure_mode)) if", "<< 14) - (self.dig_H4 << 20) - (self.dig_H5 * h))", "bytearray(1) self._l8_barray = bytearray(8) self._l3_resultarray = array(\"i\", [0, 0, 0])", "4) | (dig_e1_e7[4] >> 4) self.dig_H6 = unpack_from(\"<b\", dig_e1_e7, 6)[0]", "humi/1024 return (temp, pres, humi) @property def formated_values(self): t, p,", "and compensa the same \"\"\" self.read_raw_data(self._l3_resultarray) raw_temp, raw_press, raw_hum =", "'Unexpected temperature scale value {0}.' if temperature_scale not in [CELSIUS,", "in osamples: raise ValueError(msg_error.format(\"humidity\", humidity_mode)) self.humidity_mode = humidity_mode msg_error =", "self.humidity_mode)) time.sleep_us(sleep_time) while (unpack('<H', self.i2c.readfrom_mem( self.address, BME280_REGISTER_STATUS, 2))[0] & 0x08):", "OSAMPLE_8, OSAMPLE_16] msg_error = 'Unexpected {} operating mode value {0}.'", "h < 0 else h h = 419430400 if h", "FILTER_4 = 2 FILTER_8 = 3 FILTER_16 = 4 CELSIUS", "((var1 * self.dig_P5) << 17) var2 = var2 + (self.dig_P4", "var2) * 3125) // var1 var1 = (self.dig_P9 * (p", "= 0xF3 BME280_REGISTER_CONTROL_HUM = 0xF2 BME280_REGISTER_CONTROL = 0xF4 BME280_REGISTER_CONTROL_IIR =", "= 'C' FAHRENHEIT = 'F' KELVIN = 'K' class BME280(object):", "humidity I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: <NAME>, <NAME> Version: 3.1.2 @", "for Bosh BME280 temperature, pressure and humidity I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280", "temperature_mode)) self.temperature_mode = temperature_mode if pressure_mode not in osamples: raise", "BME280_REGISTER_CONTROL, self._l1_barray) osamples_1_16 = [ OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16]", "<< self.humidity_mode)) time.sleep_us(sleep_time) while (unpack('<H', self.i2c.readfrom_mem( self.address, BME280_REGISTER_STATUS, 2))[0] &", "var2 = var1 * var1 * self.dig_P6 var2 = var2", "pressure(self): _, p, _ = self.values return p @property def", "MicroPython driver for Bosh BME280 temperature, pressure and humidity I2C", "e4_sign = unpack_from(\"<b\", dig_e1_e7, 3)[0] self.dig_H4 = (e4_sign << 4)", "i2c=None): osamples = [ OSAMPLE_0, OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16]", "1250 if self.temperature_mode in osamples_1_16: sleep_time += 2300*(1 << self.temperature_mode)", "self.dig_H2, self.dig_H3 = unpack(\"<hB\", dig_e1_e7) e4_sign = unpack_from(\"<b\", dig_e1_e7, 3)[0]", "temp = temp + 273.15 pres = pres/256 humi =", "var1 + var2 temp = (self.t_fine * 5 + 128)", "var1 = (((var1 * var1 * self.dig_P3) >> 8) +", "= 1250 if self.temperature_mode in osamples_1_16: sleep_time += 2300*(1 <<", ">> 4 raw_temp = ((readout[3] << 16) | (readout[4] <<", "+ var1) * self.dig_P1) >> 33 if var1 == 0:", "| (readout[1] << 8) | readout[2]) >> 4 raw_temp =", "return p @property def pressure_precision(self): _, p, _ = self.read_compensated_data()", "self.values return t @property def pressure(self): _, p, _ =", "temp = temp/100 if self.temperature_scale == 'F': temp = 32", "OSAMPLE_0, OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] msg_error = 'Unexpected {}", "| 1) self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, self._l1_barray) osamples_1_16 = [ OSAMPLE_1,", "osamples_1_16: sleep_time += 575 + (2300*(1 << self.humidity_mode)) time.sleep_us(sleep_time) while", ">> 7) * self.dig_H1) >> 4) h = 0 if", "result[2] = raw_hum def read_compensated_data(self, result=None): \"\"\" Get raw data", "0 FILTER_2 = 1 FILTER_4 = 2 FILTER_8 = 3", "pressure and humidity I2C sensor: https://www.bosch-sensortec.com/bst/products/all_products/bme280 Authors: <NAME>, <NAME> Version:", "= (((1 << 47) + var1) * self.dig_P1) >> 33", "var1 = self.t_fine - 128000 var2 = var1 * var1", "t, _, _ = self.values return t @property def pressure(self):", "h = self.values return h def altitude(self, pressure_sea_level=1013.25): pi, pd", "4 raw_temp = ((readout[3] << 16) | (readout[4] << 8)", "driver for Bosh BME280 temperature, pressure and humidity I2C sensor:", "address=BME280_I2CADDR, i2c=None): osamples = [ OSAMPLE_0, OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8,", "= \"{} \"+self.temperature_scale return (temp.format(t), \"{} Pa\".format(p), \"{} %\".format(h)) @property", "self.dig_P3) >> 8) + ((var1 * self.dig_P2) << 12)) var1", "2018/04 License: MIT License (https://opensource.org/licenses/MIT) \"\"\" import time from ustruct", "= (self.dig_P9 * (p >> 13) * (p >> 13))", "self.dig_P1, \\ self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \\ self.dig_P6, self.dig_P7, self.dig_P8,", "@ 2018/04 License: MIT License (https://opensource.org/licenses/MIT) \"\"\" import time from", "128000 var2 = var1 * var1 * self.dig_P6 var2 =", "= 0 if h < 0 else h h =", "p, _ = self.read_compensated_data() pi = float(p // 256) pd", ">> 14)) h = h - (((((h >> 15) *", "raw_hum = self._l3_resultarray var1 = ((raw_temp >> 3) - (self.dig_T1", "self.dig_P2) << 12)) var1 = (((1 << 47) + var1)", "@property def pressure(self): _, p, _ = self.values return p", "0]) self._l1_barray[0] = self.iir << 2 self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_IIR, self._l1_barray)", "* self.dig_H6) >> 10) * (((h * self.dig_H3) >> 11)", "= bytearray(1) self._l8_barray = bytearray(8) self._l3_resultarray = array(\"i\", [0, 0,", "((raw_temp >> 4) - self.dig_T1) var2 = ((var2 >> 12)", "from array import array # BME280 default address BME280_I2CADDR =", "[FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]: raise ValueError(msg_error.format(iir)) self.iir = iir", ">> 4) - self.dig_T1) var2 = ((var2 >> 12) *", "not in [CELSIUS, FAHRENHEIT, KELVIN]: raise ValueError(msg_error.format(temperature_scale)) self.temperature_scale = temperature_scale", "14)) h = h - (((((h >> 15) * (h", ">> 13) * (p >> 13)) >> 25 var2 =", "dig_e1_e7, 6)[0] self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL, bytearray([0x24])) time.sleep(0.002) self.t_fine = 0", "= humidity return result return array(\"i\", (temp, pressure, humidity)) @property", "temp, pres, humi = self.read_compensated_data() temp = temp/100 if self.temperature_scale", "& 0xF) e6_sign = unpack_from(\"<b\", dig_e1_e7, 5)[0] self.dig_H5 = (e6_sign", "OSAMPLE_16] sleep_time = 1250 if self.temperature_mode in osamples_1_16: sleep_time +=", "var1 = (((1 << 47) + var1) * self.dig_P1) >>", "values(self): temp, pres, humi = self.read_compensated_data() temp = temp/100 if", "0x08): time.sleep(0.001) self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray) readout = self._l8_barray raw_press =", "3 FILTER_16 = 4 CELSIUS = 'C' FAHRENHEIT = 'F'", "_ = self.values return t @property def pressure(self): _, p,", "self.temperature_mode) if self.pressure_mode in osamples_1_16: sleep_time += 575 + (2300*(1", "h >> 12 if result: result[0] = temp result[1] =", "* (((((((h * self.dig_H6) >> 10) * (((h * self.dig_H3)", "<< 1)) * (self.dig_T2 >> 11) var2 = (raw_temp >>", "0, 0]) self._l1_barray[0] = self.iir << 2 self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL_IIR,", "= bytearray(8) self._l3_resultarray = array(\"i\", [0, 0, 0]) self._l1_barray[0] =", "= temperature_mode if pressure_mode not in osamples: raise ValueError(msg_error.format(\"pressure\", pressure_mode))", "BME280_REGISTER_CONTROL = 0xF4 BME280_REGISTER_CONTROL_IIR = 0xF5 FILTER_OFF = 0 FILTER_2", "OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] msg_error = 'Unexpected {} operating mode", "= unpack(\"<hB\", dig_e1_e7) e4_sign = unpack_from(\"<b\", dig_e1_e7, 3)[0] self.dig_H4 =", "var1 * var1 * self.dig_P6 var2 = var2 + ((var1", "0x77 OSAMPLE_0 = 0 OSAMPLE_1 = 1 OSAMPLE_2 = 2", "(self.dig_T2 >> 11) var2 = (raw_temp >> 4) - self.dig_T1", "5 + 128) >> 8 var1 = self.t_fine - 128000", "419430400 else h humidity = h >> 12 if result:", "= pressure result[2] = humidity return result return array(\"i\", (temp,", "- var2) * 3125) // var1 var1 = (self.dig_P9 *", "+ 32768)) >> 10) + 2097152) * self.dig_H2 + 8192)", "5 | self.temperature_mode << 2 | 1) self.i2c.writeto_mem( self.address, BME280_REGISTER_CONTROL,", "ValueError(msg_error.format(\"humidity\", humidity_mode)) self.humidity_mode = humidity_mode msg_error = 'Unexpected low pass", "<< 4) h = self.t_fine - 76800 h = (((((raw_hum", "address BME280_I2CADDR = 0x76 # BME280_I2CADDR = 0x77 OSAMPLE_0 =", ">> 8 var1 = self.t_fine - 128000 var2 = var1", "[ OSAMPLE_1, OSAMPLE_2, OSAMPLE_4, OSAMPLE_8, OSAMPLE_16] sleep_time = 1250 if", "<< 35) var1 = (((var1 * var1 * self.dig_P3) >>", "- raw_press p = (((p << 31) - var2) *", "# BME280 default address BME280_I2CADDR = 0x76 # BME280_I2CADDR =", "var1 * self.dig_P6 var2 = var2 + ((var1 * self.dig_P5)", "5)[0] self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4] >> 4)", "dig_88_a1) self.dig_H2, self.dig_H3 = unpack(\"<hB\", dig_e1_e7) e4_sign = unpack_from(\"<b\", dig_e1_e7," ]
[ "super(VaultBackend, self).__init__() self.connections_path = connections_path.rstrip('/') if variables_path != None: self.variables_path", "auth_type self.kwargs = kwargs self.token = token self.username = username", "username=self.username, password=self.password) elif self.auth_type == \"userpass\": _client.auth_userpass(username=self.username, password=self.password) elif self.auth_type", ":type username: str :param password: Password for Authentication (for ``ldap``", "``{\"connections_path\": \"connections\"}`` and request conn_id ``smtp_default``. :param connections_path: Specifies the", "OF ANY # KIND, either express or implied. See the", "class VaultBackend(BaseSecretsBackend, LoggingMixin): \"\"\" Retrieves Connections and Variables from Hashicorp", "Vault. :param path_prefix: Prefix for the Path to get Secret", "self.kwargs = kwargs self.token = token self.username = username self.password", "get_conn_uri(self, conn_id): # type: (str) -> Optional[str] \"\"\" Get secret", "more contributor license agreements. See the NOTICE file # distributed", ":type connections_path: str :param variables_path: Specifies the path of the", ":param conn_id: connection id :type conn_id: str \"\"\" response =", "path of the secret to read Airflow Configurations (default: 'configs').", "== 1 else response[\"data\"][\"data\"] return return_data def get_config(self, key): #", ":type url: str :param auth_type: Authentication Type for Vault (one", "Apache Software Foundation (ASF) under one # or more contributor", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "role_id: str :param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type)", ":type config_path: str :param url: Base URL for the Vault", "username self.password = password self.role_id = role_id self.kubernetes_role = kubernetes_role", "2.0 (the # \"License\"); you may not use this file", "Optional[str] username=None, # type: Optional[str] password=<PASSWORD>, # type: Optional[str] role_id=None,", "= kubernetes_jwt_path self.secret_id = secret_id self.mount_point = mount_point self.kv_engine_version =", "type: (str) -> Optional[str] \"\"\" Get Airflow Variable :param key:", "str) -> Optional[dict] \"\"\" Get secret value from Vault. :param", "\"\"\" from typing import Optional import hvac from cached_property import", "token self.username = username self.password = password self.role_id = role_id", "%s not found in Path: %s\", secret_id, secret_path) return None", "(default: 'variables') :type variables_path: str :param config_path: Specifies the path", "url: str :param auth_type: Authentication Type for Vault (one of", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "mount_point='secret', # type: str kv_engine_version=2, # type: int token=None, #", "elif self.auth_type == \"github\": _client.auth.github.login(token=self.token) elif self.auth_type == \"gcp\": from", "specific language governing permissions and limitations # under the License.", "import ( get_credentials_and_project_id, _get_scopes ) scopes = _get_scopes(self.gcp_scopes) credentials, _", "under the License is distributed on an # \"AS IS\"", "variables_path: str :param config_path: Specifies the path of the secret", "``connections`` path in ``airflow`` mount_point, this would be accessible if", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "from hvac.exceptions import InvalidPath, VaultError from airflow.exceptions import AirflowException from", "role_id self.kubernetes_role = kubernetes_role self.kubernetes_jwt_path = kubernetes_jwt_path self.secret_id = secret_id", "type: (str, str) -> Optional[dict] \"\"\" Get secret value from", "the Vault instance being addressed. :type url: str :param auth_type:", "VaultError from airflow.exceptions import AirflowException from airflow.secrets import BaseSecretsBackend from", "= self.token elif self.auth_type == \"ldap\": _client.auth.ldap.login( username=self.username, password=self.password) elif", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "None def _get_secret(self, path_prefix, secret_id): # type: (str, str) ->", "type: (str) -> Optional[str] \"\"\" Get Airflow Configuration :param key:", "Authentication (for ``ldap`` and ``userpass`` auth_type) :type password: str :param", "secret_id): # type: (str, str) -> Optional[dict] \"\"\" Get secret", "not self.token: raise VaultError(\"token cannot be None for auth_type='token'\") _client.token", "``userpass`` auth_type) :type username: str :param password: Password for Authentication", "if not self.kubernetes_role: raise VaultError(\"kubernetes_role cannot be None for auth_type='kubernetes'\")", "distributed with this work for additional information # regarding copyright", "conn_id: connection id :type conn_id: str \"\"\" response = self._get_secret(self.connections_path,", "authenticated Hashicorp Vault client \"\"\" _client = hvac.Client(url=self.url, **self.kwargs) if", "for the # specific language governing permissions and limitations #", "and ``userpass`` auth_type) :type username: str :param password: Password for", "type: Optional[str] kubernetes_role=None, # type: Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str", "self.auth_type == \"gcp\": from airflow.contrib.utils.gcp_credentials_provider import ( get_credentials_and_project_id, _get_scopes )", "response.get(\"conn_uri\") if response else None def get_variable(self, key): # type:", "raise VaultError(\"kubernetes_role cannot be None for auth_type='kubernetes'\") with open(self.kubernetes_jwt_path) as", "str :param secret_id: Secret Key :type secret_id: str \"\"\" secret_path", "Authentication Type for Vault (one of 'token', 'ldap', 'userpass', 'approle',", "See the License for the # specific language governing permissions", "to in writing, # software distributed under the License is", "self.username = username self.password = password self.role_id = role_id self.kubernetes_role", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "_client.auth.ldap.login( username=self.username, password=self.password) elif self.auth_type == \"userpass\": _client.auth_userpass(username=self.username, password=self.password) elif", "read to get Connections. (default: 'connections') :type connections_path: str :param", "client(self): # type: () -> hvac.Client \"\"\" Return an authenticated", "Optional import hvac from cached_property import cached_property from hvac.exceptions import", "if _client.is_authenticated(): return _client else: raise VaultError(\"Vault Authentication Error!\") def", "secret value from Vault. :param path_prefix: Prefix for the Path", "variables_path='variables', # type: str config_path='config', # type: str url=None, #", "secret_id=None, # type: Optional[str] gcp_key_path=None, # type: Optional[str] gcp_scopes=None, #", "type: Optional[str] gcp_scopes=None, # type: Optional[str] **kwargs ): super(VaultBackend, self).__init__()", "the secret to read to get Variables. (default: 'variables') :type", "form of URI :param conn_id: connection id :type conn_id: str", "elif self.auth_type == \"gcp\": from airflow.contrib.utils.gcp_credentials_provider import ( get_credentials_and_project_id, _get_scopes", "type: str url=None, # type: Optional[str] auth_type='token', # type: str", "Secret :type path_prefix: str :param secret_id: Secret Key :type secret_id:", "file # distributed with this work for additional information #", "self.gcp_key_path = gcp_key_path self.gcp_scopes = gcp_scopes @cached_property def client(self): #", "for Vault (one of 'token', 'ldap', 'userpass', 'approle', 'github', 'gcp',", "= kwargs self.token = token self.username = username self.password =", "``userpass`` auth_type) :type password: str :param role_id: Role ID for", "InvalidPath, VaultError from airflow.exceptions import AirflowException from airflow.secrets import BaseSecretsBackend", "for Authentication (for ``kubernetes`` auth_type) :type kubernetes_role: str :param kubernetes_jwt_path:", "_get_scopes(self.gcp_scopes) credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes) _client.auth.gcp.configure(credentials=credentials) else: raise AirflowException(\"Authentication", "== \"userpass\": _client.auth_userpass(username=self.username, password=self.password) elif self.auth_type == \"approle\": _client.auth_approle(role_id=self.role_id, secret_id=self.secret_id)", "and Variables from Hashicorp Vault Configurable via ``airflow.cfg`` as follows:", "keys are under ``connections`` path in ``airflow`` mount_point, this would", "_client.is_authenticated(): return _client else: raise VaultError(\"Vault Authentication Error!\") def get_conn_uri(self,", "path_prefix, secret_id): # type: (str, str) -> Optional[dict] \"\"\" Get", "for Authentication (for ``ldap`` and ``userpass`` auth_type) :type password: str", "= password self.role_id = role_id self.kubernetes_role = kubernetes_role self.kubernetes_jwt_path =", "type: str kv_engine_version=2, # type: int token=None, # type: Optional[str]", "request conn_id ``smtp_default``. :param connections_path: Specifies the path of the", "``github`` auth_type) :type token: str :param kv_engine_version: Select the version", "Get secret value from Vault. :param path_prefix: Prefix for the", "implied. See the License for the # specific language governing", "path_prefix: str :param secret_id: Secret Key :type secret_id: str \"\"\"", "to you under the Apache License, Version 2.0 (the #", "token: Authentication token to include in requests sent to Vault.", "example, if your keys are under ``connections`` path in ``airflow``", ":param password: Password for Authentication (for ``ldap`` and ``userpass`` auth_type)", "type: str secret_id=None, # type: Optional[str] gcp_key_path=None, # type: Optional[str]", ":param mount_point: The \"path\" the secret engine was mounted on.", "for auth_type='token'\") _client.token = self.token elif self.auth_type == \"ldap\": _client.auth.ldap.login(", "(str) -> Optional[str] \"\"\" Get Airflow Configuration :param key: Configuration", "self.kubernetes_role = kubernetes_role self.kubernetes_jwt_path = kubernetes_jwt_path self.secret_id = secret_id self.mount_point", "\"\"\" Retrieves Connections and Variables from Hashicorp Vault Configurable via", "ini [secrets] backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs = { \"connections_path\": \"connections\",", "may not use this file except in compliance # with", "the path of the secret to read to get Variables.", "str secret_id=None, # type: Optional[str] gcp_key_path=None, # type: Optional[str] gcp_scopes=None,", "VaultError(\"token cannot be None for auth_type='token'\") _client.token = self.token elif", "Path to get Secret :type path_prefix: str :param secret_id: Secret", "jwt = f.read() _client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt) elif self.auth_type == \"github\": _client.auth.github.login(token=self.token)", "\"\"\" Get Airflow Configuration :param key: Configuration Option Key :type", ":param key: Variable Key :return: Variable Value \"\"\" if self.variables_path", "return response.get(\"conn_uri\") if response else None def get_variable(self, key): #", "kubernetes_role=None, # type: Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str secret_id=None, #", "VaultError(\"kubernetes_role cannot be None for auth_type='kubernetes'\") with open(self.kubernetes_jwt_path) as f:", "conn_id): # type: (str) -> Optional[str] \"\"\" Get secret value", "str :param role_id: Role ID for Authentication (for ``approle`` auth_type)", "for Authentication (for ``approle`` auth_type) :type secret_id: str :param gcp_key_path:", "``approle`` auth_type) :type secret_id: str :param gcp_key_path: Path to GCP", "# type: Optional[str] password=<PASSWORD>, # type: Optional[str] role_id=None, # type:", "in Path: %s\", secret_id, secret_path) return None return_data = response[\"data\"]", "License, Version 2.0 (the # \"License\"); you may not use", "\"\"\" Get Airflow Variable :param key: Variable Key :return: Variable", "Vault (one of 'token', 'ldap', 'userpass', 'approle', 'github', 'gcp', 'kubernetes').", "either express or implied. See the License for the #", "= role_id self.kubernetes_role = kubernetes_role self.kubernetes_jwt_path = kubernetes_jwt_path self.secret_id =", "raise AirflowException(\"Authentication type '{}' not supported\".format(self.auth_type)) if _client.is_authenticated(): return _client", "for the Path to get Secret :type path_prefix: str :param", "# type: (str) -> Optional[str] \"\"\" Get Airflow Configuration :param", "# type: Optional[str] auth_type='token', # type: str mount_point='secret', # type:", "!= None: self.variables_path = variables_path.rstrip('/') else: self.variables_path = variables_path self.config_path", "``airflow`` mount_point, this would be accessible if you provide ``{\"connections_path\":", "Authentication (for ``ldap`` and ``userpass`` auth_type) :type username: str :param", ":type gcp_key_path: str :param gcp_scopes: Comma-separated string containing GCP scopes", "if response else None def get_variable(self, key): # type: (str)", "== \"github\": _client.auth.github.login(token=self.token) elif self.auth_type == \"gcp\": from airflow.contrib.utils.gcp_credentials_provider import", "config_path: Specifies the path of the secret to read Airflow", "not self.kubernetes_role: raise VaultError(\"kubernetes_role cannot be None for auth_type='kubernetes'\") with", "additional information # regarding copyright ownership. The ASF licenses this", "self, connections_path='connections', # type: str variables_path='variables', # type: str config_path='config',", "Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str secret_id=None, # type: Optional[str] gcp_key_path=None,", "gcp_scopes: Comma-separated string containing GCP scopes (for ``gcp`` auth_type) :type", ":param secret_id: Secret ID for Authentication (for ``approle`` auth_type) :type", "See the NOTICE file # distributed with this work for", "'connections') :type connections_path: str :param variables_path: Specifies the path of", "# type: int token=None, # type: Optional[str] username=None, # type:", "``airflow.cfg`` as follows: .. code-block:: ini [secrets] backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend", "an authenticated Hashicorp Vault client \"\"\" _client = hvac.Client(url=self.url, **self.kwargs)", "str :param gcp_key_path: Path to GCP Credential JSON file (for", "connections_path='connections', # type: str variables_path='variables', # type: str config_path='config', #", "Path to GCP Credential JSON file (for ``gcp`` auth_type) :type", "Apache License, Version 2.0 (the # \"License\"); you may not", "sent to Vault. (for ``token`` and ``github`` auth_type) :type token:", "str :rtype: str :return: Configuration Option Value retrieved from the", "Password for Authentication (for ``ldap`` and ``userpass`` auth_type) :type password:", "= connections_path.rstrip('/') if variables_path != None: self.variables_path = variables_path.rstrip('/') else:", "cached_property import cached_property from hvac.exceptions import InvalidPath, VaultError from airflow.exceptions", "'configs'). :type config_path: str :param url: Base URL for the", "= airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs = { \"connections_path\": \"connections\", \"url\": \"http://127.0.0.1:8200\", \"mount_point\":", "jwt=jwt) elif self.auth_type == \"github\": _client.auth.github.login(token=self.token) elif self.auth_type == \"gcp\":", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "token=None, # type: Optional[str] username=None, # type: Optional[str] password=<PASSWORD>, #", "} For example, if your keys are under ``connections`` path", "disable=too-many-arguments self, connections_path='connections', # type: str variables_path='variables', # type: str", "kubernetes_role: str :param kubernetes_jwt_path: Path for kubernetes jwt token (for", "file except in compliance # with the License. You may", "``smtp_default``. :param connections_path: Specifies the path of the secret to", "Specifies the path of the secret to read Airflow Configurations", "if self.auth_type == \"token\": if not self.token: raise VaultError(\"token cannot", "except InvalidPath: self.log.info(\"Secret %s not found in Path: %s\", secret_id,", "# specific language governing permissions and limitations # under the", "import hvac from cached_property import cached_property from hvac.exceptions import InvalidPath,", "variables_path.rstrip('/') else: self.variables_path = variables_path self.config_path = config_path.rstrip('/') self.url =", "limitations # under the License. \"\"\" Objects relating to sourcing", "response else None def _get_secret(self, path_prefix, secret_id): # type: (str,", "type: int token=None, # type: Optional[str] username=None, # type: Optional[str]", "you may not use this file except in compliance #", "hvac.Client \"\"\" Return an authenticated Hashicorp Vault client \"\"\" _client", "password=self.password) elif self.auth_type == \"userpass\": _client.auth_userpass(username=self.username, password=self.password) elif self.auth_type ==", "\"\"\" def __init__( # pylint: disable=too-many-arguments self, connections_path='connections', # type:", "def __init__( # pylint: disable=too-many-arguments self, connections_path='connections', # type: str", "to run (``1`` or ``2``, default: ``2``) :type kv_engine_version: int", "kubernetes_role self.kubernetes_jwt_path = kubernetes_jwt_path self.secret_id = secret_id self.mount_point = mount_point", "Role for Authentication (for ``kubernetes`` auth_type) :type kubernetes_role: str :param", "Authentication (for ``approle`` auth_type) :type role_id: str :param kubernetes_role: Role", "mount_point=self.mount_point) except InvalidPath: self.log.info(\"Secret %s not found in Path: %s\",", "and request conn_id ``smtp_default``. :param connections_path: Specifies the path of", "use this file except in compliance # with the License.", "cached_property from hvac.exceptions import InvalidPath, VaultError from airflow.exceptions import AirflowException", "your keys are under ``connections`` path in ``airflow`` mount_point, this", "password: Password for Authentication (for ``ldap`` and ``userpass`` auth_type) :type", ":return: Configuration Option Value retrieved from the vault \"\"\" response", "# type: Optional[str] gcp_scopes=None, # type: Optional[str] **kwargs ): super(VaultBackend,", "secret_id: Secret ID for Authentication (for ``approle`` auth_type) :type secret_id:", "\"http://127.0.0.1:8200\", \"mount_point\": \"airflow\" } For example, if your keys are", "str \"\"\" def __init__( # pylint: disable=too-many-arguments self, connections_path='connections', #", "Secret Key :type secret_id: str \"\"\" secret_path = self.build_path(path_prefix, secret_id)", "contributor license agreements. See the NOTICE file # distributed with", "self.kv_engine_version == 1 else response[\"data\"][\"data\"] return return_data def get_config(self, key):", "path in ``airflow`` mount_point, this would be accessible if you", "\"\"\" Return an authenticated Hashicorp Vault client \"\"\" _client =", "f.read() _client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt) elif self.auth_type == \"github\": _client.auth.github.login(token=self.token) elif self.auth_type", "value from Vault. Store the secret in the form of", "(str) -> Optional[str] \"\"\" Get secret value from Vault. Store", "import InvalidPath, VaultError from airflow.exceptions import AirflowException from airflow.secrets import", "typing import Optional import hvac from cached_property import cached_property from", "auth_type) :type secret_id: str :param gcp_key_path: Path to GCP Credential", "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "str :param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes``", "hvac from cached_property import cached_property from hvac.exceptions import InvalidPath, VaultError", "kv_engine_version: Select the version of the engine to run (``1``", "to GCP Credential JSON file (for ``gcp`` auth_type) :type gcp_key_path:", "_client = hvac.Client(url=self.url, **self.kwargs) if self.auth_type == \"token\": if not", "int token=None, # type: Optional[str] username=None, # type: Optional[str] password=<PASSWORD>,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "with this work for additional information # regarding copyright ownership.", "variables_path self.config_path = config_path.rstrip('/') self.url = url self.auth_type = auth_type", "from cached_property import cached_property from hvac.exceptions import InvalidPath, VaultError from", "conn_id ``smtp_default``. :param connections_path: Specifies the path of the secret", "_client else: raise VaultError(\"Vault Authentication Error!\") def get_conn_uri(self, conn_id): #", "in ``airflow`` mount_point, this would be accessible if you provide", "connections & variables from Hashicorp Vault \"\"\" from typing import", ":param config_path: Specifies the path of the secret to read", "scopes = _get_scopes(self.gcp_scopes) credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes) _client.auth.gcp.configure(credentials=credentials) else:", "raise VaultError(\"Vault Authentication Error!\") def get_conn_uri(self, conn_id): # type: (str)", "def client(self): # type: () -> hvac.Client \"\"\" Return an", "Airflow Configuration :param key: Configuration Option Key :type key: str", "self.kv_engine_version = kv_engine_version self.gcp_key_path = gcp_key_path self.gcp_scopes = gcp_scopes @cached_property", "mount_point: str :param token: Authentication token to include in requests", "``kubernetes`` auth_type, deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type kubernetes_jwt_path: str :param secret_id: Secret", "secret_id: str :param gcp_key_path: Path to GCP Credential JSON file", "work for additional information # regarding copyright ownership. The ASF", "# type: (str) -> Optional[str] \"\"\" Get secret value from", "For example, if your keys are under ``connections`` path in", "self.token = token self.username = username self.password = password self.role_id", "AirflowException from airflow.secrets import BaseSecretsBackend from airflow.utils.log.logging_mixin import LoggingMixin class", "response = self.client.secrets.kv.v2.read_secret_version( path=secret_path, mount_point=self.mount_point) except InvalidPath: self.log.info(\"Secret %s not", "Optional[str] gcp_key_path=None, # type: Optional[str] gcp_scopes=None, # type: Optional[str] **kwargs", "= self.client.secrets.kv.v2.read_secret_version( path=secret_path, mount_point=self.mount_point) except InvalidPath: self.log.info(\"Secret %s not found", "self.client.secrets.kv.v2.read_secret_version( path=secret_path, mount_point=self.mount_point) except InvalidPath: self.log.info(\"Secret %s not found in", "distributed under the License is distributed on an # \"AS", "self.secret_id = secret_id self.mount_point = mount_point self.kv_engine_version = kv_engine_version self.gcp_key_path", "Path: %s\", secret_id, secret_path) return None return_data = response[\"data\"] if", "follows: .. code-block:: ini [secrets] backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs =", "str \"\"\" response = self._get_secret(self.connections_path, conn_id) return response.get(\"conn_uri\") if response", "# software distributed under the License is distributed on an", "_ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes) _client.auth.gcp.configure(credentials=credentials) else: raise AirflowException(\"Authentication type '{}'", "response = self._get_secret(self.variables_path, key) return response.get(\"value\") if response else None", "Key :type key: str :rtype: str :return: Configuration Option Value", "key) return response.get(\"value\") if response else None def _get_secret(self, path_prefix,", "'approle', 'github', 'gcp', 'kubernetes'). Default is ``token``. :type auth_type: str", "the License. You may obtain a copy of the License", "from airflow.utils.log.logging_mixin import LoggingMixin class VaultBackend(BaseSecretsBackend, LoggingMixin): \"\"\" Retrieves Connections", "requests sent to Vault. (for ``token`` and ``github`` auth_type) :type", "code-block:: ini [secrets] backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs = { \"connections_path\":", "auth_type: str :param mount_point: The \"path\" the secret engine was", "import cached_property from hvac.exceptions import InvalidPath, VaultError from airflow.exceptions import", "self.url = url self.auth_type = auth_type self.kwargs = kwargs self.token", "# type: str kv_engine_version=2, # type: int token=None, # type:", "under the Apache License, Version 2.0 (the # \"License\"); you", "Role ID for Authentication (for ``approle`` auth_type) :type role_id: str", "raise VaultError(\"token cannot be None for auth_type='token'\") _client.token = self.token", "'{}' not supported\".format(self.auth_type)) if _client.is_authenticated(): return _client else: raise VaultError(\"Vault", "vault \"\"\" response = self._get_secret(self.config_path, key) return response.get(\"value\") if response", "() -> hvac.Client \"\"\" Return an authenticated Hashicorp Vault client", ".. code-block:: ini [secrets] backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs = {", "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "url=None, # type: Optional[str] auth_type='token', # type: str mount_point='secret', #", "regarding copyright ownership. The ASF licenses this file # to", "str :param kv_engine_version: Select the version of the engine to", "or agreed to in writing, # software distributed under the", "# type: str config_path='config', # type: str url=None, # type:", "Select the version of the engine to run (``1`` or", "== None: return None else: response = self._get_secret(self.variables_path, key) return", "# type: Optional[str] **kwargs ): super(VaultBackend, self).__init__() self.connections_path = connections_path.rstrip('/')", "in the form of URI :param conn_id: connection id :type", "-> Optional[str] \"\"\" Get Airflow Configuration :param key: Configuration Option", "self.kubernetes_role: raise VaultError(\"kubernetes_role cannot be None for auth_type='kubernetes'\") with open(self.kubernetes_jwt_path)", "be accessible if you provide ``{\"connections_path\": \"connections\"}`` and request conn_id", "the path of the secret to read Airflow Configurations (default:", "password: str :param role_id: Role ID for Authentication (for ``approle``", "_client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt) elif self.auth_type == \"github\": _client.auth.github.login(token=self.token) elif self.auth_type ==", "key: str :rtype: str :return: Configuration Option Value retrieved from", "LoggingMixin class VaultBackend(BaseSecretsBackend, LoggingMixin): \"\"\" Retrieves Connections and Variables from", "return response.get(\"value\") if response else None def _get_secret(self, path_prefix, secret_id):", "**kwargs ): super(VaultBackend, self).__init__() self.connections_path = connections_path.rstrip('/') if variables_path !=", ":return: Variable Value \"\"\" if self.variables_path == None: return None", "Return an authenticated Hashicorp Vault client \"\"\" _client = hvac.Client(url=self.url,", "Specifies the path of the secret to read to get", "value from Vault. :param path_prefix: Prefix for the Path to", "username: Username for Authentication (for ``ldap`` and ``userpass`` auth_type) :type", "return None return_data = response[\"data\"] if self.kv_engine_version == 1 else", "or more contributor license agreements. See the NOTICE file #", "return None else: response = self._get_secret(self.variables_path, key) return response.get(\"value\") if", "engine to run (``1`` or ``2``, default: ``2``) :type kv_engine_version:", "\"\"\" Get secret value from Vault. Store the secret in", "airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs = { \"connections_path\": \"connections\", \"url\": \"http://127.0.0.1:8200\", \"mount_point\": \"airflow\"", "include in requests sent to Vault. (for ``token`` and ``github``", "kv_engine_version self.gcp_key_path = gcp_key_path self.gcp_scopes = gcp_scopes @cached_property def client(self):", "str :param password: Password for Authentication (for ``ldap`` and ``userpass``", "the secret in the form of URI :param conn_id: connection", "this work for additional information # regarding copyright ownership. The", "from Hashicorp Vault \"\"\" from typing import Optional import hvac", "(Default: ``secret``) :type mount_point: str :param token: Authentication token to", "path_prefix: Prefix for the Path to get Secret :type path_prefix:", "Prefix for the Path to get Secret :type path_prefix: str", "Vault instance being addressed. :type url: str :param auth_type: Authentication", "Get secret value from Vault. Store the secret in the", "read to get Variables. (default: 'variables') :type variables_path: str :param", "the NOTICE file # distributed with this work for additional", "(for ``approle`` auth_type) :type role_id: str :param kubernetes_role: Role for", "connections_path: str :param variables_path: Specifies the path of the secret", "# type: str secret_id=None, # type: Optional[str] gcp_key_path=None, # type:", "Vault client \"\"\" _client = hvac.Client(url=self.url, **self.kwargs) if self.auth_type ==", "with open(self.kubernetes_jwt_path) as f: jwt = f.read() _client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt) elif", "get_credentials_and_project_id, _get_scopes ) scopes = _get_scopes(self.gcp_scopes) credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path,", "pylint: disable=too-many-arguments self, connections_path='connections', # type: str variables_path='variables', # type:", "\"\"\" if self.variables_path == None: return None else: response =", "the License. \"\"\" Objects relating to sourcing connections & variables", "mount_point: The \"path\" the secret engine was mounted on. (Default:", "Optional[str] password=<PASSWORD>, # type: Optional[str] role_id=None, # type: Optional[str] kubernetes_role=None,", "gcp_scopes: str \"\"\" def __init__( # pylint: disable=too-many-arguments self, connections_path='connections',", "str kv_engine_version=2, # type: int token=None, # type: Optional[str] username=None,", "mount_point, this would be accessible if you provide ``{\"connections_path\": \"connections\"}``", "type: () -> hvac.Client \"\"\" Return an authenticated Hashicorp Vault", "secret to read to get Variables. (default: 'variables') :type variables_path:", "\"kubernetes\": if not self.kubernetes_role: raise VaultError(\"kubernetes_role cannot be None for", "== \"kubernetes\": if not self.kubernetes_role: raise VaultError(\"kubernetes_role cannot be None", "this would be accessible if you provide ``{\"connections_path\": \"connections\"}`` and", "str mount_point='secret', # type: str kv_engine_version=2, # type: int token=None,", "None return_data = response[\"data\"] if self.kv_engine_version == 1 else response[\"data\"][\"data\"]", "self.auth_type == \"github\": _client.auth.github.login(token=self.token) elif self.auth_type == \"gcp\": from airflow.contrib.utils.gcp_credentials_provider", "engine was mounted on. (Default: ``secret``) :type mount_point: str :param", "str :param secret_id: Secret ID for Authentication (for ``approle`` auth_type)", "self.gcp_scopes = gcp_scopes @cached_property def client(self): # type: () ->", "# type: Optional[str] kubernetes_role=None, # type: Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type:", "the version of the engine to run (``1`` or ``2``,", "auth_type='token', # type: str mount_point='secret', # type: str kv_engine_version=2, #", "from the vault \"\"\" response = self._get_secret(self.config_path, key) return response.get(\"value\")", "id :type conn_id: str \"\"\" response = self._get_secret(self.connections_path, conn_id) return", "mounted on. (Default: ``secret``) :type mount_point: str :param token: Authentication", ":param url: Base URL for the Vault instance being addressed.", "gcp_scopes=None, # type: Optional[str] **kwargs ): super(VaultBackend, self).__init__() self.connections_path =", "None def get_variable(self, key): # type: (str) -> Optional[str] \"\"\"", "backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs = { \"connections_path\": \"connections\", \"url\": \"http://127.0.0.1:8200\",", "KIND, either express or implied. See the License for the", "Type for Vault (one of 'token', 'ldap', 'userpass', 'approle', 'github',", "and ``userpass`` auth_type) :type password: str :param role_id: Role ID", "Optional[dict] \"\"\" Get secret value from Vault. :param path_prefix: Prefix", "\"\"\" Get secret value from Vault. :param path_prefix: Prefix for", "airflow.utils.log.logging_mixin import LoggingMixin class VaultBackend(BaseSecretsBackend, LoggingMixin): \"\"\" Retrieves Connections and", "= _get_scopes(self.gcp_scopes) credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes) _client.auth.gcp.configure(credentials=credentials) else: raise", "def get_conn_uri(self, conn_id): # type: (str) -> Optional[str] \"\"\" Get", "= url self.auth_type = auth_type self.kwargs = kwargs self.token =", "else None def get_variable(self, key): # type: (str) -> Optional[str]", "Airflow Variable :param key: Variable Key :return: Variable Value \"\"\"", "%s\", secret_id, secret_path) return None return_data = response[\"data\"] if self.kv_engine_version", "Authentication (for ``kubernetes`` auth_type) :type kubernetes_role: str :param kubernetes_jwt_path: Path", "elif self.auth_type == \"kubernetes\": if not self.kubernetes_role: raise VaultError(\"kubernetes_role cannot", "found in Path: %s\", secret_id, secret_path) return None return_data =", ":type password: str :param role_id: Role ID for Authentication (for", "else: self.variables_path = variables_path self.config_path = config_path.rstrip('/') self.url = url", ":rtype: str :return: Configuration Option Value retrieved from the vault", "of the secret to read to get Variables. (default: 'variables')", "or implied. See the License for the # specific language", "instance being addressed. :type url: str :param auth_type: Authentication Type", "express or implied. See the License for the # specific", "kubernetes_jwt_path: str :param secret_id: Secret ID for Authentication (for ``approle``", "of 'token', 'ldap', 'userpass', 'approle', 'github', 'gcp', 'kubernetes'). Default is", "= gcp_key_path self.gcp_scopes = gcp_scopes @cached_property def client(self): # type:", "or ``2``, default: ``2``) :type kv_engine_version: int :param username: Username", "elif self.auth_type == \"userpass\": _client.auth_userpass(username=self.username, password=self.password) elif self.auth_type == \"approle\":", "auth_type) :type token: str :param kv_engine_version: Select the version of", "type: str config_path='config', # type: str url=None, # type: Optional[str]", "password=self.password) elif self.auth_type == \"approle\": _client.auth_approle(role_id=self.role_id, secret_id=self.secret_id) elif self.auth_type ==", "(default: 'configs'). :type config_path: str :param url: Base URL for", "token to include in requests sent to Vault. (for ``token``", "``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type kubernetes_jwt_path: str :param secret_id: Secret ID for Authentication", "(for ``approle`` auth_type) :type secret_id: str :param gcp_key_path: Path to", "accessible if you provide ``{\"connections_path\": \"connections\"}`` and request conn_id ``smtp_default``.", "the vault \"\"\" response = self._get_secret(self.config_path, key) return response.get(\"value\") if", "Configurable via ``airflow.cfg`` as follows: .. code-block:: ini [secrets] backend", "the # specific language governing permissions and limitations # under", "'variables') :type variables_path: str :param config_path: Specifies the path of", ":param secret_id: Secret Key :type secret_id: str \"\"\" secret_path =", "type: str mount_point='secret', # type: str kv_engine_version=2, # type: int", "secret_id self.mount_point = mount_point self.kv_engine_version = kv_engine_version self.gcp_key_path = gcp_key_path", "= gcp_scopes @cached_property def client(self): # type: () -> hvac.Client", "Secret ID for Authentication (for ``approle`` auth_type) :type secret_id: str", "str :param url: Base URL for the Vault instance being", "may obtain a copy of the License at # #", "secret in the form of URI :param conn_id: connection id", "Optional[str] \"\"\" Get Airflow Variable :param key: Variable Key :return:", "type: Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str secret_id=None, # type: Optional[str]", "connection id :type conn_id: str \"\"\" response = self._get_secret(self.connections_path, conn_id)", "self.config_path = config_path.rstrip('/') self.url = url self.auth_type = auth_type self.kwargs", "The ASF licenses this file # to you under the", "LoggingMixin): \"\"\" Retrieves Connections and Variables from Hashicorp Vault Configurable", ":param auth_type: Authentication Type for Vault (one of 'token', 'ldap',", "type '{}' not supported\".format(self.auth_type)) if _client.is_authenticated(): return _client else: raise", "= token self.username = username self.password = password self.role_id =", "provide ``{\"connections_path\": \"connections\"}`` and request conn_id ``smtp_default``. :param connections_path: Specifies", "if you provide ``{\"connections_path\": \"connections\"}`` and request conn_id ``smtp_default``. :param", "client \"\"\" _client = hvac.Client(url=self.url, **self.kwargs) if self.auth_type == \"token\":", "elif self.auth_type == \"approle\": _client.auth_approle(role_id=self.role_id, secret_id=self.secret_id) elif self.auth_type == \"kubernetes\":", "__init__( # pylint: disable=too-many-arguments self, connections_path='connections', # type: str variables_path='variables',", "# Licensed to the Apache Software Foundation (ASF) under one", "AirflowException(\"Authentication type '{}' not supported\".format(self.auth_type)) if _client.is_authenticated(): return _client else:", "from airflow.contrib.utils.gcp_credentials_provider import ( get_credentials_and_project_id, _get_scopes ) scopes = _get_scopes(self.gcp_scopes)", "language governing permissions and limitations # under the License. \"\"\"", "return return_data def get_config(self, key): # type: (str) -> Optional[str]", "key): # type: (str) -> Optional[str] \"\"\" Get Airflow Variable", "= mount_point self.kv_engine_version = kv_engine_version self.gcp_key_path = gcp_key_path self.gcp_scopes =", "are under ``connections`` path in ``airflow`` mount_point, this would be", "# type: Optional[str] role_id=None, # type: Optional[str] kubernetes_role=None, # type:", "1 else response[\"data\"][\"data\"] return return_data def get_config(self, key): # type:", "law or agreed to in writing, # software distributed under", "get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes) _client.auth.gcp.configure(credentials=credentials) else: raise AirflowException(\"Authentication type '{}' not supported\".format(self.auth_type))", "_get_scopes ) scopes = _get_scopes(self.gcp_scopes) credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes)", "type: (str) -> Optional[str] \"\"\" Get secret value from Vault.", "Foundation (ASF) under one # or more contributor license agreements.", "Get Airflow Variable :param key: Variable Key :return: Variable Value", ") scopes = _get_scopes(self.gcp_scopes) credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes) _client.auth.gcp.configure(credentials=credentials)", "if self.kv_engine_version == 1: response = self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=self.mount_point )", "kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type) :type kubernetes_role: str", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "else: raise VaultError(\"Vault Authentication Error!\") def get_conn_uri(self, conn_id): # type:", "auth_type) :type role_id: str :param kubernetes_role: Role for Authentication (for", "str config_path='config', # type: str url=None, # type: Optional[str] auth_type='token',", "Software Foundation (ASF) under one # or more contributor license", "auth_type) :type password: str :param role_id: Role ID for Authentication", "= self.build_path(path_prefix, secret_id) try: if self.kv_engine_version == 1: response =", "-> hvac.Client \"\"\" Return an authenticated Hashicorp Vault client \"\"\"", "path of the secret to read to get Connections. (default:", "for Authentication (for ``ldap`` and ``userpass`` auth_type) :type username: str", "# regarding copyright ownership. The ASF licenses this file #", "Hashicorp Vault \"\"\" from typing import Optional import hvac from", "f: jwt = f.read() _client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt) elif self.auth_type == \"github\":", "in compliance # with the License. You may obtain a", "# to you under the Apache License, Version 2.0 (the", "License for the # specific language governing permissions and limitations", "response else None def get_variable(self, key): # type: (str) ->", "OR CONDITIONS OF ANY # KIND, either express or implied.", "= response[\"data\"] if self.kv_engine_version == 1 else response[\"data\"][\"data\"] return return_data", "``ldap`` and ``userpass`` auth_type) :type username: str :param password: Password", "(for ``gcp`` auth_type) :type gcp_scopes: str \"\"\" def __init__( #", "# type: (str) -> Optional[str] \"\"\" Get Airflow Variable :param", "_get_secret(self, path_prefix, secret_id): # type: (str, str) -> Optional[dict] \"\"\"", "= username self.password = password self.role_id = role_id self.kubernetes_role =", "addressed. :type url: str :param auth_type: Authentication Type for Vault", "response.get(\"value\") if response else None def _get_secret(self, path_prefix, secret_id): #", "\"airflow\" } For example, if your keys are under ``connections``", "str :param gcp_scopes: Comma-separated string containing GCP scopes (for ``gcp``", "\"ldap\": _client.auth.ldap.login( username=self.username, password=self.password) elif self.auth_type == \"userpass\": _client.auth_userpass(username=self.username, password=self.password)", "auth_type) :type kubernetes_role: str :param kubernetes_jwt_path: Path for kubernetes jwt", "gcp_scopes @cached_property def client(self): # type: () -> hvac.Client \"\"\"", "= get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes) _client.auth.gcp.configure(credentials=credentials) else: raise AirflowException(\"Authentication type '{}' not", "kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, deafult:", "variables_path: Specifies the path of the secret to read to", "else: response = self._get_secret(self.variables_path, key) return response.get(\"value\") if response else", "airflow.contrib.utils.gcp_credentials_provider import ( get_credentials_and_project_id, _get_scopes ) scopes = _get_scopes(self.gcp_scopes) credentials,", "get Secret :type path_prefix: str :param secret_id: Secret Key :type", "this file # to you under the Apache License, Version", "{ \"connections_path\": \"connections\", \"url\": \"http://127.0.0.1:8200\", \"mount_point\": \"airflow\" } For example,", "under ``connections`` path in ``airflow`` mount_point, this would be accessible", "\"connections\"}`` and request conn_id ``smtp_default``. :param connections_path: Specifies the path", ":param token: Authentication token to include in requests sent to", "you provide ``{\"connections_path\": \"connections\"}`` and request conn_id ``smtp_default``. :param connections_path:", "copyright ownership. The ASF licenses this file # to you", "the secret to read Airflow Configurations (default: 'configs'). :type config_path:", "auth_type) :type gcp_key_path: str :param gcp_scopes: Comma-separated string containing GCP", ":type role_id: str :param kubernetes_role: Role for Authentication (for ``kubernetes``", "secret_id: Secret Key :type secret_id: str \"\"\" secret_path = self.build_path(path_prefix,", "Get Airflow Configuration :param key: Configuration Option Key :type key:", "if variables_path != None: self.variables_path = variables_path.rstrip('/') else: self.variables_path =", "self.auth_type = auth_type self.kwargs = kwargs self.token = token self.username", "supported\".format(self.auth_type)) if _client.is_authenticated(): return _client else: raise VaultError(\"Vault Authentication Error!\")", "Vault \"\"\" from typing import Optional import hvac from cached_property", "def _get_secret(self, path_prefix, secret_id): # type: (str, str) -> Optional[dict]", "secret_id, secret_path) return None return_data = response[\"data\"] if self.kv_engine_version ==", "in writing, # software distributed under the License is distributed", "airflow.secrets import BaseSecretsBackend from airflow.utils.log.logging_mixin import LoggingMixin class VaultBackend(BaseSecretsBackend, LoggingMixin):", "username=None, # type: Optional[str] password=<PASSWORD>, # type: Optional[str] role_id=None, #", "(for ``kubernetes`` auth_type, deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type kubernetes_jwt_path: str :param secret_id:", "Vault Configurable via ``airflow.cfg`` as follows: .. code-block:: ini [secrets]", "1: response = self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=self.mount_point ) else: response =", ":param path_prefix: Prefix for the Path to get Secret :type", "\"url\": \"http://127.0.0.1:8200\", \"mount_point\": \"airflow\" } For example, if your keys", "\"\"\" response = self._get_secret(self.connections_path, conn_id) return response.get(\"conn_uri\") if response else", "'gcp', 'kubernetes'). Default is ``token``. :type auth_type: str :param mount_point:", ":type gcp_scopes: str \"\"\" def __init__( # pylint: disable=too-many-arguments self,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", ":type secret_id: str \"\"\" secret_path = self.build_path(path_prefix, secret_id) try: if", "License is distributed on an # \"AS IS\" BASIS, WITHOUT", "= hvac.Client(url=self.url, **self.kwargs) if self.auth_type == \"token\": if not self.token:", "-> Optional[str] \"\"\" Get Airflow Variable :param key: Variable Key", ":type path_prefix: str :param secret_id: Secret Key :type secret_id: str", "(for ``gcp`` auth_type) :type gcp_key_path: str :param gcp_scopes: Comma-separated string", "conn_id: str \"\"\" response = self._get_secret(self.connections_path, conn_id) return response.get(\"conn_uri\") if", "to include in requests sent to Vault. (for ``token`` and", "get Variables. (default: 'variables') :type variables_path: str :param config_path: Specifies", "(str, str) -> Optional[dict] \"\"\" Get secret value from Vault.", "Key :type secret_id: str \"\"\" secret_path = self.build_path(path_prefix, secret_id) try:", ":param connections_path: Specifies the path of the secret to read", ":type mount_point: str :param token: Authentication token to include in", "of the secret to read Airflow Configurations (default: 'configs'). :type", "None for auth_type='token'\") _client.token = self.token elif self.auth_type == \"ldap\":", "secret_id=self.secret_id) elif self.auth_type == \"kubernetes\": if not self.kubernetes_role: raise VaultError(\"kubernetes_role", "file (for ``gcp`` auth_type) :type gcp_key_path: str :param gcp_scopes: Comma-separated", "import BaseSecretsBackend from airflow.utils.log.logging_mixin import LoggingMixin class VaultBackend(BaseSecretsBackend, LoggingMixin): \"\"\"", "Retrieves Connections and Variables from Hashicorp Vault Configurable via ``airflow.cfg``", "( get_credentials_and_project_id, _get_scopes ) scopes = _get_scopes(self.gcp_scopes) credentials, _ =", "secret_id: str \"\"\" secret_path = self.build_path(path_prefix, secret_id) try: if self.kv_engine_version", "``kubernetes`` auth_type) :type kubernetes_role: str :param kubernetes_jwt_path: Path for kubernetes", "# type: str mount_point='secret', # type: str kv_engine_version=2, # type:", "\"\"\" _client = hvac.Client(url=self.url, **self.kwargs) if self.auth_type == \"token\": if", "# \"License\"); you may not use this file except in", "was mounted on. (Default: ``secret``) :type mount_point: str :param token:", "variables from Hashicorp Vault \"\"\" from typing import Optional import", "VaultError(\"Vault Authentication Error!\") def get_conn_uri(self, conn_id): # type: (str) ->", "Error!\") def get_conn_uri(self, conn_id): # type: (str) -> Optional[str] \"\"\"", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", ":type kv_engine_version: int :param username: Username for Authentication (for ``ldap``", "-> Optional[str] \"\"\" Get secret value from Vault. Store the", "to the Apache Software Foundation (ASF) under one # or", "\"License\"); you may not use this file except in compliance", "None: self.variables_path = variables_path.rstrip('/') else: self.variables_path = variables_path self.config_path =", "``token``. :type auth_type: str :param mount_point: The \"path\" the secret", "Hashicorp Vault Configurable via ``airflow.cfg`` as follows: .. code-block:: ini", "connections_path.rstrip('/') if variables_path != None: self.variables_path = variables_path.rstrip('/') else: self.variables_path", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "type: Optional[str] gcp_key_path=None, # type: Optional[str] gcp_scopes=None, # type: Optional[str]", "def get_variable(self, key): # type: (str) -> Optional[str] \"\"\" Get", "to get Secret :type path_prefix: str :param secret_id: Secret Key", "Configuration Option Key :type key: str :rtype: str :return: Configuration", "Optional[str] gcp_scopes=None, # type: Optional[str] **kwargs ): super(VaultBackend, self).__init__() self.connections_path", "(for ``ldap`` and ``userpass`` auth_type) :type password: str :param role_id:", "path=secret_path, mount_point=self.mount_point ) else: response = self.client.secrets.kv.v2.read_secret_version( path=secret_path, mount_point=self.mount_point) except", "): super(VaultBackend, self).__init__() self.connections_path = connections_path.rstrip('/') if variables_path != None:", ":param kv_engine_version: Select the version of the engine to run", "type: Optional[str] **kwargs ): super(VaultBackend, self).__init__() self.connections_path = connections_path.rstrip('/') if", "and limitations # under the License. \"\"\" Objects relating to", "Objects relating to sourcing connections & variables from Hashicorp Vault", "auth_type='kubernetes'\") with open(self.kubernetes_jwt_path) as f: jwt = f.read() _client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt)", "# distributed with this work for additional information # regarding", "None else: response = self._get_secret(self.variables_path, key) return response.get(\"value\") if response", "key): # type: (str) -> Optional[str] \"\"\" Get Airflow Configuration", "writing, # software distributed under the License is distributed on", "Connections. (default: 'connections') :type connections_path: str :param variables_path: Specifies the", ":type auth_type: str :param mount_point: The \"path\" the secret engine", "url self.auth_type = auth_type self.kwargs = kwargs self.token = token", "from Hashicorp Vault Configurable via ``airflow.cfg`` as follows: .. code-block::", "else response[\"data\"][\"data\"] return return_data def get_config(self, key): # type: (str)", "key: Variable Key :return: Variable Value \"\"\" if self.variables_path ==", "type: Optional[str] username=None, # type: Optional[str] password=<PASSWORD>, # type: Optional[str]", "(for ``ldap`` and ``userpass`` auth_type) :type username: str :param password:", "self.token: raise VaultError(\"token cannot be None for auth_type='token'\") _client.token =", "= self._get_secret(self.variables_path, key) return response.get(\"value\") if response else None def", "InvalidPath: self.log.info(\"Secret %s not found in Path: %s\", secret_id, secret_path)", ":param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type) :type kubernetes_role:", ":type kubernetes_jwt_path: str :param secret_id: Secret ID for Authentication (for", "self.role_id = role_id self.kubernetes_role = kubernetes_role self.kubernetes_jwt_path = kubernetes_jwt_path self.secret_id", "CONDITIONS OF ANY # KIND, either express or implied. See", "default: ``2``) :type kv_engine_version: int :param username: Username for Authentication", "and ``github`` auth_type) :type token: str :param kv_engine_version: Select the", "``approle`` auth_type) :type role_id: str :param kubernetes_role: Role for Authentication", "self).__init__() self.connections_path = connections_path.rstrip('/') if variables_path != None: self.variables_path =", "secret to read to get Connections. (default: 'connections') :type connections_path:", "the secret engine was mounted on. (Default: ``secret``) :type mount_point:", "self.log.info(\"Secret %s not found in Path: %s\", secret_id, secret_path) return", "# type: () -> hvac.Client \"\"\" Return an authenticated Hashicorp", "# type: str variables_path='variables', # type: str config_path='config', # type:", "mount_point=self.mount_point ) else: response = self.client.secrets.kv.v2.read_secret_version( path=secret_path, mount_point=self.mount_point) except InvalidPath:", "secret to read Airflow Configurations (default: 'configs'). :type config_path: str", "get Connections. (default: 'connections') :type connections_path: str :param variables_path: Specifies", "url: Base URL for the Vault instance being addressed. :type", "= f.read() _client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt) elif self.auth_type == \"github\": _client.auth.github.login(token=self.token) elif", "Default is ``token``. :type auth_type: str :param mount_point: The \"path\"", "from airflow.secrets import BaseSecretsBackend from airflow.utils.log.logging_mixin import LoggingMixin class VaultBackend(BaseSecretsBackend,", "of the engine to run (``1`` or ``2``, default: ``2``)", "VaultBackend(BaseSecretsBackend, LoggingMixin): \"\"\" Retrieves Connections and Variables from Hashicorp Vault", "for additional information # regarding copyright ownership. The ASF licenses", "[secrets] backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs = { \"connections_path\": \"connections\", \"url\":", "the Apache Software Foundation (ASF) under one # or more", "get_config(self, key): # type: (str) -> Optional[str] \"\"\" Get Airflow", "Optional[str] kubernetes_role=None, # type: Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str secret_id=None,", "str :param token: Authentication token to include in requests sent", "# # Unless required by applicable law or agreed to", "\"\"\" response = self._get_secret(self.config_path, key) return response.get(\"value\") if response else", "Version 2.0 (the # \"License\"); you may not use this", "else: raise AirflowException(\"Authentication type '{}' not supported\".format(self.auth_type)) if _client.is_authenticated(): return", "auth_type: Authentication Type for Vault (one of 'token', 'ldap', 'userpass',", "scopes=scopes) _client.auth.gcp.configure(credentials=credentials) else: raise AirflowException(\"Authentication type '{}' not supported\".format(self.auth_type)) if", "\"path\" the secret engine was mounted on. (Default: ``secret``) :type", "\"approle\": _client.auth_approle(role_id=self.role_id, secret_id=self.secret_id) elif self.auth_type == \"kubernetes\": if not self.kubernetes_role:", "one # or more contributor license agreements. See the NOTICE", "as follows: .. code-block:: ini [secrets] backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend backend_kwargs", ":type kubernetes_role: str :param kubernetes_jwt_path: Path for kubernetes jwt token", "\"\"\" Objects relating to sourcing connections & variables from Hashicorp", "\"connections_path\": \"connections\", \"url\": \"http://127.0.0.1:8200\", \"mount_point\": \"airflow\" } For example, if", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "to get Connections. (default: 'connections') :type connections_path: str :param variables_path:", "cannot be None for auth_type='kubernetes'\") with open(self.kubernetes_jwt_path) as f: jwt", ") else: response = self.client.secrets.kv.v2.read_secret_version( path=secret_path, mount_point=self.mount_point) except InvalidPath: self.log.info(\"Secret", "Optional[str] **kwargs ): super(VaultBackend, self).__init__() self.connections_path = connections_path.rstrip('/') if variables_path", "return_data = response[\"data\"] if self.kv_engine_version == 1 else response[\"data\"][\"data\"] return", "(str) -> Optional[str] \"\"\" Get Airflow Variable :param key: Variable", "via ``airflow.cfg`` as follows: .. code-block:: ini [secrets] backend =", "str :param mount_point: The \"path\" the secret engine was mounted", "str url=None, # type: Optional[str] auth_type='token', # type: str mount_point='secret',", "= secret_id self.mount_point = mount_point self.kv_engine_version = kv_engine_version self.gcp_key_path =", "not supported\".format(self.auth_type)) if _client.is_authenticated(): return _client else: raise VaultError(\"Vault Authentication", "conn_id) return response.get(\"conn_uri\") if response else None def get_variable(self, key):", "self.mount_point = mount_point self.kv_engine_version = kv_engine_version self.gcp_key_path = gcp_key_path self.gcp_scopes", "get_variable(self, key): # type: (str) -> Optional[str] \"\"\" Get Airflow", "except in compliance # with the License. You may obtain", "else None def _get_secret(self, path_prefix, secret_id): # type: (str, str)", "self.variables_path = variables_path.rstrip('/') else: self.variables_path = variables_path self.config_path = config_path.rstrip('/')", "to get Variables. (default: 'variables') :type variables_path: str :param config_path:", "\"userpass\": _client.auth_userpass(username=self.username, password=self.password) elif self.auth_type == \"approle\": _client.auth_approle(role_id=self.role_id, secret_id=self.secret_id) elif", "# under the License. \"\"\" Objects relating to sourcing connections", "(for ``kubernetes`` auth_type) :type kubernetes_role: str :param kubernetes_jwt_path: Path for", ":param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type,", "self.auth_type == \"kubernetes\": if not self.kubernetes_role: raise VaultError(\"kubernetes_role cannot be", "NOTICE file # distributed with this work for additional information", "= config_path.rstrip('/') self.url = url self.auth_type = auth_type self.kwargs =", "to read Airflow Configurations (default: 'configs'). :type config_path: str :param", "\"github\": _client.auth.github.login(token=self.token) elif self.auth_type == \"gcp\": from airflow.contrib.utils.gcp_credentials_provider import (", "of URI :param conn_id: connection id :type conn_id: str \"\"\"", "this file except in compliance # with the License. You", "for kubernetes jwt token (for ``kubernetes`` auth_type, deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type", "run (``1`` or ``2``, default: ``2``) :type kv_engine_version: int :param", "(for ``token`` and ``github`` auth_type) :type token: str :param kv_engine_version:", "None: return None else: response = self._get_secret(self.variables_path, key) return response.get(\"value\")", "credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes) _client.auth.gcp.configure(credentials=credentials) else: raise AirflowException(\"Authentication type", "kwargs self.token = token self.username = username self.password = password", "== \"gcp\": from airflow.contrib.utils.gcp_credentials_provider import ( get_credentials_and_project_id, _get_scopes ) scopes", "license agreements. See the NOTICE file # distributed with this", ":param username: Username for Authentication (for ``ldap`` and ``userpass`` auth_type)", ":type token: str :param kv_engine_version: Select the version of the", "required by applicable law or agreed to in writing, #", "self.connections_path = connections_path.rstrip('/') if variables_path != None: self.variables_path = variables_path.rstrip('/')", "if self.variables_path == None: return None else: response = self._get_secret(self.variables_path,", "try: if self.kv_engine_version == 1: response = self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=self.mount_point", "``token`` and ``github`` auth_type) :type token: str :param kv_engine_version: Select", "to read to get Connections. (default: 'connections') :type connections_path: str", "if self.kv_engine_version == 1 else response[\"data\"][\"data\"] return return_data def get_config(self,", "the License for the # specific language governing permissions and", "auth_type) :type gcp_scopes: str \"\"\" def __init__( # pylint: disable=too-many-arguments", "BaseSecretsBackend from airflow.utils.log.logging_mixin import LoggingMixin class VaultBackend(BaseSecretsBackend, LoggingMixin): \"\"\" Retrieves", "relating to sourcing connections & variables from Hashicorp Vault \"\"\"", "to read to get Variables. (default: 'variables') :type variables_path: str", "== 1: response = self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=self.mount_point ) else: response", ":param variables_path: Specifies the path of the secret to read", "the engine to run (``1`` or ``2``, default: ``2``) :type", "ANY # KIND, either express or implied. See the License", "import LoggingMixin class VaultBackend(BaseSecretsBackend, LoggingMixin): \"\"\" Retrieves Connections and Variables", "variables_path != None: self.variables_path = variables_path.rstrip('/') else: self.variables_path = variables_path", "the License is distributed on an # \"AS IS\" BASIS,", "'token', 'ldap', 'userpass', 'approle', 'github', 'gcp', 'kubernetes'). Default is ``token``.", "(default: 'connections') :type connections_path: str :param variables_path: Specifies the path", "'userpass', 'approle', 'github', 'gcp', 'kubernetes'). Default is ``token``. :type auth_type:", "``ldap`` and ``userpass`` auth_type) :type password: str :param role_id: Role", "response = self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=self.mount_point ) else: response = self.client.secrets.kv.v2.read_secret_version(", "from typing import Optional import hvac from cached_property import cached_property", "Optional[str] \"\"\" Get Airflow Configuration :param key: Configuration Option Key", "= variables_path self.config_path = config_path.rstrip('/') self.url = url self.auth_type =", "# # Licensed to the Apache Software Foundation (ASF) under", "= kv_engine_version self.gcp_key_path = gcp_key_path self.gcp_scopes = gcp_scopes @cached_property def", "str variables_path='variables', # type: str config_path='config', # type: str url=None,", "self.auth_type == \"userpass\": _client.auth_userpass(username=self.username, password=self.password) elif self.auth_type == \"approle\": _client.auth_approle(role_id=self.role_id,", "secret engine was mounted on. (Default: ``secret``) :type mount_point: str", "``gcp`` auth_type) :type gcp_scopes: str \"\"\" def __init__( # pylint:", "path=secret_path, mount_point=self.mount_point) except InvalidPath: self.log.info(\"Secret %s not found in Path:", "Vault. (for ``token`` and ``github`` auth_type) :type token: str :param", "jwt token (for ``kubernetes`` auth_type, deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type kubernetes_jwt_path: str", "not use this file except in compliance # with the", "str :param config_path: Specifies the path of the secret to", "scopes (for ``gcp`` auth_type) :type gcp_scopes: str \"\"\" def __init__(", "on. (Default: ``secret``) :type mount_point: str :param token: Authentication token", "self.variables_path = variables_path self.config_path = config_path.rstrip('/') self.url = url self.auth_type", "(``1`` or ``2``, default: ``2``) :type kv_engine_version: int :param username:", "Variable :param key: Variable Key :return: Variable Value \"\"\" if", "role_id=None, # type: Optional[str] kubernetes_role=None, # type: Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', #", "Unless required by applicable law or agreed to in writing,", "password=<PASSWORD>, # type: Optional[str] role_id=None, # type: Optional[str] kubernetes_role=None, #", "auth_type='token'\") _client.token = self.token elif self.auth_type == \"ldap\": _client.auth.ldap.login( username=self.username,", "\"token\": if not self.token: raise VaultError(\"token cannot be None for", "None for auth_type='kubernetes'\") with open(self.kubernetes_jwt_path) as f: jwt = f.read()", "hvac.Client(url=self.url, **self.kwargs) if self.auth_type == \"token\": if not self.token: raise", "Authentication (for ``approle`` auth_type) :type secret_id: str :param gcp_key_path: Path", "(ASF) under one # or more contributor license agreements. See", "= auth_type self.kwargs = kwargs self.token = token self.username =", "Value \"\"\" if self.variables_path == None: return None else: response", "# type: Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str secret_id=None, # type:", "Variables from Hashicorp Vault Configurable via ``airflow.cfg`` as follows: ..", "-> Optional[dict] \"\"\" Get secret value from Vault. :param path_prefix:", "str \"\"\" secret_path = self.build_path(path_prefix, secret_id) try: if self.kv_engine_version ==", "# or more contributor license agreements. See the NOTICE file", "self.auth_type == \"approle\": _client.auth_approle(role_id=self.role_id, secret_id=self.secret_id) elif self.auth_type == \"kubernetes\": if", "agreed to in writing, # software distributed under the License", "gcp_key_path: str :param gcp_scopes: Comma-separated string containing GCP scopes (for", "if not self.token: raise VaultError(\"token cannot be None for auth_type='token'\")", "\"\"\" secret_path = self.build_path(path_prefix, secret_id) try: if self.kv_engine_version == 1:", "_client.token = self.token elif self.auth_type == \"ldap\": _client.auth.ldap.login( username=self.username, password=self.password)", "type: Optional[str] password=<PASSWORD>, # type: Optional[str] role_id=None, # type: Optional[str]", "kubernetes jwt token (for ``kubernetes`` auth_type, deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type kubernetes_jwt_path:", "Option Key :type key: str :rtype: str :return: Configuration Option", "== \"ldap\": _client.auth.ldap.login( username=self.username, password=self.password) elif self.auth_type == \"userpass\": _client.auth_userpass(username=self.username,", "== \"approle\": _client.auth_approle(role_id=self.role_id, secret_id=self.secret_id) elif self.auth_type == \"kubernetes\": if not", "_client.auth.github.login(token=self.token) elif self.auth_type == \"gcp\": from airflow.contrib.utils.gcp_credentials_provider import ( get_credentials_and_project_id,", "The \"path\" the secret engine was mounted on. (Default: ``secret``)", "ID for Authentication (for ``approle`` auth_type) :type secret_id: str :param", "self.variables_path == None: return None else: response = self._get_secret(self.variables_path, key)", "self.password = password self.role_id = role_id self.kubernetes_role = kubernetes_role self.kubernetes_jwt_path", "= kubernetes_role self.kubernetes_jwt_path = kubernetes_jwt_path self.secret_id = secret_id self.mount_point =", "JSON file (for ``gcp`` auth_type) :type gcp_key_path: str :param gcp_scopes:", "'github', 'gcp', 'kubernetes'). Default is ``token``. :type auth_type: str :param", "password self.role_id = role_id self.kubernetes_role = kubernetes_role self.kubernetes_jwt_path = kubernetes_jwt_path", "self.build_path(path_prefix, secret_id) try: if self.kv_engine_version == 1: response = self.client.secrets.kv.v1.read_secret(", "secret value from Vault. Store the secret in the form", ":param key: Configuration Option Key :type key: str :rtype: str", "auth_type, deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type kubernetes_jwt_path: str :param secret_id: Secret ID", "= variables_path.rstrip('/') else: self.variables_path = variables_path self.config_path = config_path.rstrip('/') self.url", "(the # \"License\"); you may not use this file except", "deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type kubernetes_jwt_path: str :param secret_id: Secret ID for", "auth_type) :type username: str :param password: Password for Authentication (for", "if your keys are under ``connections`` path in ``airflow`` mount_point,", "ID for Authentication (for ``approle`` auth_type) :type role_id: str :param", "Base URL for the Vault instance being addressed. :type url:", "'kubernetes'). Default is ``token``. :type auth_type: str :param mount_point: The", ":type key: str :rtype: str :return: Configuration Option Value retrieved", "ASF licenses this file # to you under the Apache", "to Vault. (for ``token`` and ``github`` auth_type) :type token: str", "self.kubernetes_jwt_path = kubernetes_jwt_path self.secret_id = secret_id self.mount_point = mount_point self.kv_engine_version", "Store the secret in the form of URI :param conn_id:", "Configuration Option Value retrieved from the vault \"\"\" response =", "airflow.exceptions import AirflowException from airflow.secrets import BaseSecretsBackend from airflow.utils.log.logging_mixin import", "of the secret to read to get Connections. (default: 'connections')", "response = self._get_secret(self.config_path, key) return response.get(\"value\") if response else None", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "response[\"data\"] if self.kv_engine_version == 1 else response[\"data\"][\"data\"] return return_data def", "# pylint: disable=too-many-arguments self, connections_path='connections', # type: str variables_path='variables', #", "str :return: Configuration Option Value retrieved from the vault \"\"\"", "sourcing connections & variables from Hashicorp Vault \"\"\" from typing", "ownership. The ASF licenses this file # to you under", "retrieved from the vault \"\"\" response = self._get_secret(self.config_path, key) return", "mount_point self.kv_engine_version = kv_engine_version self.gcp_key_path = gcp_key_path self.gcp_scopes = gcp_scopes", "``gcp`` auth_type) :type gcp_key_path: str :param gcp_scopes: Comma-separated string containing", "str :param auth_type: Authentication Type for Vault (one of 'token',", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "_client.auth_approle(role_id=self.role_id, secret_id=self.secret_id) elif self.auth_type == \"kubernetes\": if not self.kubernetes_role: raise", "the Path to get Secret :type path_prefix: str :param secret_id:", "secret_id) try: if self.kv_engine_version == 1: response = self.client.secrets.kv.v1.read_secret( path=secret_path,", "with the License. You may obtain a copy of the", "connections_path: Specifies the path of the secret to read to", "import AirflowException from airflow.secrets import BaseSecretsBackend from airflow.utils.log.logging_mixin import LoggingMixin", "``2``) :type kv_engine_version: int :param username: Username for Authentication (for", "kv_engine_version: int :param username: Username for Authentication (for ``ldap`` and", "response[\"data\"][\"data\"] return return_data def get_config(self, key): # type: (str) ->", "applicable law or agreed to in writing, # software distributed", "string containing GCP scopes (for ``gcp`` auth_type) :type gcp_scopes: str", "``2``, default: ``2``) :type kv_engine_version: int :param username: Username for", "token: str :param kv_engine_version: Select the version of the engine", "key: Configuration Option Key :type key: str :rtype: str :return:", "@cached_property def client(self): # type: () -> hvac.Client \"\"\" Return", "URI :param conn_id: connection id :type conn_id: str \"\"\" response", "secret_path) return None return_data = response[\"data\"] if self.kv_engine_version == 1", "config_path: str :param url: Base URL for the Vault instance", "Authentication token to include in requests sent to Vault. (for", "str :param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type) :type", "Username for Authentication (for ``ldap`` and ``userpass`` auth_type) :type username:", "= { \"connections_path\": \"connections\", \"url\": \"http://127.0.0.1:8200\", \"mount_point\": \"airflow\" } For", "type: str variables_path='variables', # type: str config_path='config', # type: str", "type: Optional[str] role_id=None, # type: Optional[str] kubernetes_role=None, # type: Optional[str]", "kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str secret_id=None, # type: Optional[str] gcp_key_path=None, #", "self._get_secret(self.variables_path, key) return response.get(\"value\") if response else None def _get_secret(self,", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "file # to you under the Apache License, Version 2.0", "# with the License. You may obtain a copy of", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "containing GCP scopes (for ``gcp`` auth_type) :type gcp_scopes: str \"\"\"", "= self._get_secret(self.connections_path, conn_id) return response.get(\"conn_uri\") if response else None def", "int :param username: Username for Authentication (for ``ldap`` and ``userpass``", "in requests sent to Vault. (for ``token`` and ``github`` auth_type)", "to sourcing connections & variables from Hashicorp Vault \"\"\" from", "gcp_key_path=None, # type: Optional[str] gcp_scopes=None, # type: Optional[str] **kwargs ):", "kv_engine_version=2, # type: int token=None, # type: Optional[str] username=None, #", "config_path='config', # type: str url=None, # type: Optional[str] auth_type='token', #", "software distributed under the License is distributed on an #", "Licensed to the Apache Software Foundation (ASF) under one #", ":param gcp_key_path: Path to GCP Credential JSON file (for ``gcp``", ":param role_id: Role ID for Authentication (for ``approle`` auth_type) :type", "Variable Key :return: Variable Value \"\"\" if self.variables_path == None:", "Optional[str] role_id=None, # type: Optional[str] kubernetes_role=None, # type: Optional[str] kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token',", "under one # or more contributor license agreements. See the", "self._get_secret(self.connections_path, conn_id) return response.get(\"conn_uri\") if response else None def get_variable(self,", "is ``token``. :type auth_type: str :param mount_point: The \"path\" the", "the path of the secret to read to get Connections.", "Value retrieved from the vault \"\"\" response = self._get_secret(self.config_path, key)", "Key :return: Variable Value \"\"\" if self.variables_path == None: return", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "License. \"\"\" Objects relating to sourcing connections & variables from", "# type: Optional[str] username=None, # type: Optional[str] password=<PASSWORD>, # type:", "# type: (str, str) -> Optional[dict] \"\"\" Get secret value", "\"connections\", \"url\": \"http://127.0.0.1:8200\", \"mount_point\": \"airflow\" } For example, if your", "import Optional import hvac from cached_property import cached_property from hvac.exceptions", "return_data def get_config(self, key): # type: (str) -> Optional[str] \"\"\"", "Option Value retrieved from the vault \"\"\" response = self._get_secret(self.config_path,", "information # regarding copyright ownership. The ASF licenses this file", "if response else None def _get_secret(self, path_prefix, secret_id): # type:", "from Vault. Store the secret in the form of URI", "\"mount_point\": \"airflow\" } For example, if your keys are under", "the Apache License, Version 2.0 (the # \"License\"); you may", "gcp_key_path: Path to GCP Credential JSON file (for ``gcp`` auth_type)", "path of the secret to read to get Variables. (default:", "(one of 'token', 'ldap', 'userpass', 'approle', 'github', 'gcp', 'kubernetes'). Default", "Vault. Store the secret in the form of URI :param", "_client.auth.gcp.configure(credentials=credentials) else: raise AirflowException(\"Authentication type '{}' not supported\".format(self.auth_type)) if _client.is_authenticated():", "you under the Apache License, Version 2.0 (the # \"License\");", "Airflow Configurations (default: 'configs'). :type config_path: str :param url: Base", "# KIND, either express or implied. See the License for", "username: str :param password: Password for Authentication (for ``ldap`` and", ":type secret_id: str :param gcp_key_path: Path to GCP Credential JSON", "be None for auth_type='token'\") _client.token = self.token elif self.auth_type ==", "_client.auth_userpass(username=self.username, password=self.password) elif self.auth_type == \"approle\": _client.auth_approle(role_id=self.role_id, secret_id=self.secret_id) elif self.auth_type", "elif self.auth_type == \"ldap\": _client.auth.ldap.login( username=self.username, password=self.password) elif self.auth_type ==", "for auth_type='kubernetes'\") with open(self.kubernetes_jwt_path) as f: jwt = f.read() _client.auth_kubernetes(role=self.kubernetes_role,", "GCP scopes (for ``gcp`` auth_type) :type gcp_scopes: str \"\"\" def", "as f: jwt = f.read() _client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt) elif self.auth_type ==", "**self.kwargs) if self.auth_type == \"token\": if not self.token: raise VaultError(\"token", "Configuration :param key: Configuration Option Key :type key: str :rtype:", "under the License. \"\"\" Objects relating to sourcing connections &", "the secret to read to get Connections. (default: 'connections') :type", "Hashicorp Vault client \"\"\" _client = hvac.Client(url=self.url, **self.kwargs) if self.auth_type", "secret_path = self.build_path(path_prefix, secret_id) try: if self.kv_engine_version == 1: response", "agreements. See the NOTICE file # distributed with this work", "\"gcp\": from airflow.contrib.utils.gcp_credentials_provider import ( get_credentials_and_project_id, _get_scopes ) scopes =", "from Vault. :param path_prefix: Prefix for the Path to get", "response = self._get_secret(self.connections_path, conn_id) return response.get(\"conn_uri\") if response else None", "licenses this file # to you under the Apache License,", "# type: str url=None, # type: Optional[str] auth_type='token', # type:", "return _client else: raise VaultError(\"Vault Authentication Error!\") def get_conn_uri(self, conn_id):", "for the Vault instance being addressed. :type url: str :param", "role_id: Role ID for Authentication (for ``approle`` auth_type) :type role_id:", "governing permissions and limitations # under the License. \"\"\" Objects", "type: Optional[str] auth_type='token', # type: str mount_point='secret', # type: str", "by applicable law or agreed to in writing, # software", "# Unless required by applicable law or agreed to in", "hvac.exceptions import InvalidPath, VaultError from airflow.exceptions import AirflowException from airflow.secrets", "the form of URI :param conn_id: connection id :type conn_id:", "being addressed. :type url: str :param auth_type: Authentication Type for", "Optional[str] \"\"\" Get secret value from Vault. Store the secret", "Path for kubernetes jwt token (for ``kubernetes`` auth_type, deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``)", "self.auth_type == \"ldap\": _client.auth.ldap.login( username=self.username, password=self.password) elif self.auth_type == \"userpass\":", "Configurations (default: 'configs'). :type config_path: str :param url: Base URL", "GCP Credential JSON file (for ``gcp`` auth_type) :type gcp_key_path: str", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "from airflow.exceptions import AirflowException from airflow.secrets import BaseSecretsBackend from airflow.utils.log.logging_mixin", "License. You may obtain a copy of the License at", "self.token elif self.auth_type == \"ldap\": _client.auth.ldap.login( username=self.username, password=self.password) elif self.auth_type", "You may obtain a copy of the License at #", "be None for auth_type='kubernetes'\") with open(self.kubernetes_jwt_path) as f: jwt =", "``secret``) :type mount_point: str :param token: Authentication token to include", "version of the engine to run (``1`` or ``2``, default:", "URL for the Vault instance being addressed. :type url: str", "kubernetes_jwt_path self.secret_id = secret_id self.mount_point = mount_point self.kv_engine_version = kv_engine_version", "self.auth_type == \"token\": if not self.token: raise VaultError(\"token cannot be", "Optional[str] auth_type='token', # type: str mount_point='secret', # type: str kv_engine_version=2,", "& variables from Hashicorp Vault \"\"\" from typing import Optional", "Credential JSON file (for ``gcp`` auth_type) :type gcp_key_path: str :param", "== \"token\": if not self.token: raise VaultError(\"token cannot be None", "else: response = self.client.secrets.kv.v2.read_secret_version( path=secret_path, mount_point=self.mount_point) except InvalidPath: self.log.info(\"Secret %s", "Connections and Variables from Hashicorp Vault Configurable via ``airflow.cfg`` as", "config_path.rstrip('/') self.url = url self.auth_type = auth_type self.kwargs = kwargs", "backend_kwargs = { \"connections_path\": \"connections\", \"url\": \"http://127.0.0.1:8200\", \"mount_point\": \"airflow\" }", ":param gcp_scopes: Comma-separated string containing GCP scopes (for ``gcp`` auth_type)", "compliance # with the License. You may obtain a copy", "for Authentication (for ``approle`` auth_type) :type role_id: str :param kubernetes_role:", "# type: Optional[str] gcp_key_path=None, # type: Optional[str] gcp_scopes=None, # type:", "Variable Value \"\"\" if self.variables_path == None: return None else:", "not found in Path: %s\", secret_id, secret_path) return None return_data", "permissions and limitations # under the License. \"\"\" Objects relating", "self.kv_engine_version == 1: response = self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=self.mount_point ) else:", "read Airflow Configurations (default: 'configs'). :type config_path: str :param url:", "Variables. (default: 'variables') :type variables_path: str :param config_path: Specifies the", "def get_config(self, key): # type: (str) -> Optional[str] \"\"\" Get", "Comma-separated string containing GCP scopes (for ``gcp`` auth_type) :type gcp_scopes:", "str :param variables_path: Specifies the path of the secret to", "token (for ``kubernetes`` auth_type, deafult: ``/var/run/secrets/kubernetes.io/serviceaccount/token``) :type kubernetes_jwt_path: str :param", ":type conn_id: str \"\"\" response = self._get_secret(self.connections_path, conn_id) return response.get(\"conn_uri\")", "open(self.kubernetes_jwt_path) as f: jwt = f.read() _client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt) elif self.auth_type", "would be accessible if you provide ``{\"connections_path\": \"connections\"}`` and request", "Authentication Error!\") def get_conn_uri(self, conn_id): # type: (str) -> Optional[str]", "'ldap', 'userpass', 'approle', 'github', 'gcp', 'kubernetes'). Default is ``token``. :type", "= self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=self.mount_point ) else: response = self.client.secrets.kv.v2.read_secret_version( path=secret_path,", "gcp_key_path self.gcp_scopes = gcp_scopes @cached_property def client(self): # type: ()", "cannot be None for auth_type='token'\") _client.token = self.token elif self.auth_type", ":type variables_path: str :param config_path: Specifies the path of the", "self.client.secrets.kv.v1.read_secret( path=secret_path, mount_point=self.mount_point ) else: response = self.client.secrets.kv.v2.read_secret_version( path=secret_path, mount_point=self.mount_point)" ]
[ "items to raw document representation. 2. Converts the collection of", "[x[1] for x in los1]) # Create bag of words", "elapsed > 60: if total > 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else:", "between 0-1, with 1 being identical. \"\"\" if type(los1) ==", "from local file lap(\"Loading CSV data from local file...\") df", "Approach Comparison - Hamming Distance Vector Distance Metrics - Jaccard", "killmails by text similarity. Edit Distance Metrics - Levenshtein Distance", "los2]) # Create bag of words tfidf = TfidfVectorizer().fit_transform([doc1, doc2])", "short text items to raw document representation. 2. Converts the", "df['items'].apply(literal_eval) # Group DataFrame by character_id and compute distance series", "time start = time.time() total = 0 def lap(msg): \"\"\"Records", "doc2]) # Vectorize the bag of words cos_dist = linear_kernel(tfidf[0:1],", "in los1]) # Create bag of words doc2 = reduce(lambda", "for each group lap(\"Computing cosine distances and change in kd", "= [] items1 = gp['items'] items2 = gp['items'].shift() for i", "using TfidfVectorizer (combines vector counting and TF-IDF calculator). 3. Computes", "end=\"\\r\") lap(\"Concatenating resulting groups and writing to file...\") df_res =", "Distance Metrics - Jaccard Similarity - Cosine Distance Written By:", "lists. 1. Converts collection of long text items to raw", "y: f'{x} {y}', [x[1] for x in los1]) # Create", "# Compute cosine distance return cos_dist # Load CSV from", "value between 0-1, with 1 being identical and 0 being", "0: return 0 doc1 = reduce(lambda x, y: f'{x} {y}',", "groups.append(gp) # Record progress count += 1 print(f\"Progress {count/num_groups:2.1%}\", end=\"\\r\")", "complete different. \"\"\" if type(los1) == float or type(los2) ==", "(combines vector counting and TF-IDF calculator). 3. Computes cosine similarity", "of raw strings. Returns: cosine distance as a value between", "to avoid adding nan pair los1 = items1.iloc[i] los2 =", "- total total = time.time() - start if elapsed >", "reduce(lambda x, y: f'{x} {y}', [x[1] for x in los2])", "y: f'{x} {y}', [x[1] for x in los2]) # Create", "name, gp in groupby: # Order the observations and prepare", "= linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist def", "range at 1 cos_dist_lt = [np.nan] # cosine distance b/w", "= (time.time() - start) - total total = time.time() -", "from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel def get_long_text_cosine_distance(los1,", "df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type',", "= [] # list to append modified group dataframes to", "print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif elapsed > 60: if total > 3600:", "{y}', [x[1] for x in los2]) # Create bag of", "column value types...\") df['items'] = df['items'].apply(literal_eval) # Group DataFrame by", "Record progress count += 1 print(f\"Progress {count/num_groups:2.1%}\", end=\"\\r\") lap(\"Concatenating resulting", "Generate distance series using pairs list and different metrics #", "to for name, gp in groupby: # Order the observations", "Generate pairs of observations sequentially to compare pairs = []", "reduce import os import sys import numpy as np import", "the dataframe gp = (gp.sort_values(by=['killmail_id']) .reset_index() .drop('index', axis=1)) # Generate", "tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist # Load CSV", "gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1)) gp.insert(idx+1,", "Uses linear kernel since TF-IDF matrix will be normalized already.", "0 doc1 = reduce(lambda x, y: f'{x} {y}', [x[0] for", "or type(los2) == float: return 0 if len(los1) == 0", "modified group dataframes to for name, gp in groupby: #", "type(los2) == float: return 0 if len(los1) == 0 or", "Distance Metrics - Levenshtein Distance - Damerau-Levenshtein Distance - Jaro", "utf-8 -*- \"\"\"Computes distance between killmails by text similarity. Edit", "change in kd ratio kills1 = gp['k_count'] kills2 = gp['k_count'].shift()", "encoding='utf-8') df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill']) df =", "to a matrix of TF-IDF features using TfidfVectorizer (combines vector", "import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel def get_long_text_cosine_distance(los1, los2): \"\"\"Calculates", "x in los2]) # Create bag of words tfidf =", "1 being identical and 0 being complete different. \"\"\" if", "0-1, with 1 being identical. \"\"\" if type(los1) == float", "start, total elapsed = (time.time() - start) - total total", "DataFrame by character_id and compute distance series for each group", "if len(los1) == 0 or len(los2) == 0: return 0", "(deaths2 - deaths1)) gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate pairs of", "0 being complete different. \"\"\" if type(los1) == float or", "gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate pairs of observations sequentially to", "== float or type(los2) == float: return 0 if len(los1)", "= gp['items'] items2 = gp['items'].shift() for i in range(1, len(gp)):", "dataframes to for name, gp in groupby: # Order the", "df.dropna() # Convert items column to correct data type lap(\"Converting", "get_long_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between two killmails' item lists.", "60: if total > 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}')", "collection of raw documents to a matrix of TF-IDF features", "type lap(\"Converting 'item' column value types...\") df['items'] = df['items'].apply(literal_eval) #", "groupby: # Order the observations and prepare the dataframe gp", "Converts the collection of raw documents to a matrix of", "los1)) # Generate distance series using pairs list and different", "in los2]) # Create bag of words tfidf = TfidfVectorizer().fit_transform([doc1,", "import time start = time.time() total = 0 def lap(msg):", "total = time.time() - start if elapsed > 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h)", "cosine distance b/w short text BoW for pair in pairs:", "pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx = len(gp.columns) gp.insert(idx, 'cos_dist_lt',", "distance series for each group lap(\"Computing cosine distances and change", "2. Converts the collection of raw documents to a matrix", "total > 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap(\"Importing modules...\")", "{count/num_groups:2.1%}\", end=\"\\r\") lap(\"Concatenating resulting groups and writing to file...\") df_res", "character_id num_groups = len(groupby) # get number of groups count", "# Order the observations and prepare the dataframe gp =", "if elapsed > 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif elapsed > 60:", "and prepare the dataframe gp = (gp.sort_values(by=['killmail_id']) .reset_index() .drop('index', axis=1))", "= len(gp.columns) gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 -", "of raw strings. los2: Second document, a list of raw", "+= 1 print(f\"Progress {count/num_groups:2.1%}\", end=\"\\r\") lap(\"Concatenating resulting groups and writing", "def lap(msg): \"\"\"Records time elapsed.\"\"\" global start, total elapsed =", "kd ratio kills1 = gp['k_count'] kills2 = gp['k_count'].shift() deaths1 =", "# Generate distance series using pairs list and different metrics", "lap(\"Importing modules...\") from ast import literal_eval from functools import reduce", "== 0: return 0 doc1 = reduce(lambda x, y: f'{x}", "of TF-IDF features using TfidfVectorizer (combines vector counting and TF-IDF", "the observations and prepare the dataframe gp = (gp.sort_values(by=['killmail_id']) .reset_index()", "if total > 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif total > 60:", "Computes cosine similarity between feature vectors. Uses linear kernel since", "ast import literal_eval from functools import reduce import os import", "# -*- coding: utf-8 -*- \"\"\"Computes distance between killmails by", "by grouping character_id's...\") groupby = df.groupby('character_id') # group dataframe by", "and 0 being complete different. \"\"\" if type(los1) == float", "# Vectorize the bag of words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0]", "using pairs list and different metrics # start distance series", "character_id's...\") groupby = df.groupby('character_id') # group dataframe by character_id num_groups", "Rating Approach Comparison - Hamming Distance Vector Distance Metrics -", "Distance - Jaro Distance - Jaro-Winkler Distance - Match Rating", "groups groups = [] # list to append modified group", "'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate pairs of observations sequentially to compare", "y: f'{x} {y}', [x[0] for x in los2]) # Create", "two killmails' item lists. 1. Converts collection of short text", "change in kd by grouping character_id's...\") groupby = df.groupby('character_id') #", "Compute cosine distance return cos_dist # Load CSV from local", "series with nan due to starting range at 1 cos_dist_lt", "gp.insert(idx, 'cos_dist_st', cos_dist_st) groups.append(gp) # Record progress count += 1", "import linear_kernel def get_long_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between two", "reduce(lambda x, y: f'{x} {y}', [x[0] for x in los1])", "x, y: f'{x} {y}', [x[1] for x in los2]) #", "# get number of groups count = 0 # current", "- Levenshtein Distance - Damerau-Levenshtein Distance - Jaro Distance -", "of words doc2 = reduce(lambda x, y: f'{x} {y}', [x[1]", "TF-IDF matrix will be normalized already. Arguments: los1: First document,", "nan pair los1 = items1.iloc[i] los2 = items2.iloc[i] pairs.append((los2, los1))", "'cos_dist_st', cos_dist_st) groups.append(gp) # Record progress count += 1 print(f\"Progress", "len(gp.columns) gp.insert(idx, 'cos_dist_lt', cos_dist_lt) gp.insert(idx, 'cos_dist_st', cos_dist_st) groups.append(gp) # Record", "= reduce(lambda x, y: f'{x} {y}', [x[0] for x in", "distance as a value between 0-1, with 1 being identical", "{y}', [x[0] for x in los2]) # Create bag of", "0-1, with 1 being identical and 0 being complete different.", "matrix will be normalized already. Arguments: los1: First document, a", "linear_kernel def get_long_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between two killmails'", "[np.nan] # cosine distance b/w long text BoW cos_dist_st =", "# Start from 1 to avoid adding nan pair los1", "\"\"\" # Start timing import time start = time.time() total", "Distance - Match Rating Approach Comparison - Hamming Distance Vector", "elif total > 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap(\"Importing", "= gp['d_count'].shift() idx = len(gp.columns) gp.insert(idx, 'del_kdratio', (kills2 - kills1)", "be normalized already. Arguments: los1: First document, a list of", "pairs = [] items1 = gp['items'] items2 = gp['items'].shift() for", "Start from 1 to avoid adding nan pair los1 =", "and compute distance series for each group lap(\"Computing cosine distances", "CSV from local file lap(\"Loading CSV data from local file...\")", "with 1 being identical. \"\"\" if type(los1) == float or", "{y}', [x[0] for x in los1]) # Create bag of", "distance b/w short text BoW for pair in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0],", "series for each group lap(\"Computing cosine distances and change in", "= reduce(lambda x, y: f'{x} {y}', [x[1] for x in", "import sys import numpy as np import pandas as pd", "a value between 0-1, with 1 being identical and 0", "group dataframes to for name, gp in groupby: # Order", "{msg}') elif elapsed > 60: if total > 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h)", "Distance Written By: <NAME> Updated On: 11/09/2019 \"\"\" # Start", "return cos_dist # Load CSV from local file lap(\"Loading CSV", "Start timing import time start = time.time() total = 0", "gp['k_count'].shift() deaths1 = gp['d_count'] deaths2 = gp['d_count'].shift() idx = len(gp.columns)", "to compare pairs = [] items1 = gp['items'] items2 =", "x, y: f'{x} {y}', [x[1] for x in los1]) #", "number of groups groups = [] # list to append", "1. Converts collection of long text items to raw document", "CSV data from local file...\") df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df", "{y}', [x[1] for x in los1]) # Create bag of", "cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx = len(gp.columns) gp.insert(idx, 'cos_dist_lt', cos_dist_lt)", "{msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap(\"Importing modules...\") from ast import literal_eval", "0 doc1 = reduce(lambda x, y: f'{x} {y}', [x[1] for", "by character_id and compute distance series for each group lap(\"Computing", "of observations sequentially to compare pairs = [] items1 =", "= gp['items'].shift() for i in range(1, len(gp)): # Start from", "pairs list and different metrics # start distance series with", "with nan due to starting range at 1 cos_dist_lt =", "cos_dist_st) groups.append(gp) # Record progress count += 1 print(f\"Progress {count/num_groups:2.1%}\",", "[x[1] for x in los2]) # Create bag of words", "1. Converts collection of short text items to raw document", "at 1 cos_dist_lt = [np.nan] # cosine distance b/w long", "Converts collection of long text items to raw document representation.", "<NAME> Updated On: 11/09/2019 \"\"\" # Start timing import time", "in kills over change in deaths and change in kd", "as a value between 0-1, with 1 being identical and", "document, a list of raw strings. los2: Second document, a", "vectors. Uses linear kernel since TF-IDF matrix will be normalized", "dataframe by character_id num_groups = len(groupby) # get number of", "i in range(1, len(gp)): # Start from 1 to avoid", "bag of words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine", "a list of raw strings. Returns: cosine distance as a", "compute distance series for each group lap(\"Computing cosine distances and", "f'{x} {y}', [x[1] for x in los2]) # Create bag", "b/w short text BoW for pair in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1]))", "\"\"\"Records time elapsed.\"\"\" global start, total elapsed = (time.time() -", "and different metrics # start distance series with nan due", "of number of groups groups = [] # list to", "- deaths1)) gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate pairs of observations", "the bag of words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute", "text similarity. Edit Distance Metrics - Levenshtein Distance - Damerau-Levenshtein", "by character_id num_groups = len(groupby) # get number of groups", "kills2 = gp['k_count'].shift() deaths1 = gp['d_count'] deaths2 = gp['d_count'].shift() idx", "as a value between 0-1, with 1 being identical. \"\"\"", "len(los1) == 0 or len(los2) == 0: return 0 doc1", "cosine distances and change in kd by grouping character_id's...\") groupby", "of long text items to raw document representation. 2. Converts", "local file...\") df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df = df.drop(columns=['HighSlotISK', 'MidSlotISK',", "time.time() total = 0 def lap(msg): \"\"\"Records time elapsed.\"\"\" global", "0 if len(los1) == 0 or len(los2) == 0: return", "- Cosine Distance Written By: <NAME> Updated On: 11/09/2019 \"\"\"", "Create bag of words tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize", "items1 = gp['items'] items2 = gp['items'].shift() for i in range(1,", "Match Rating Approach Comparison - Hamming Distance Vector Distance Metrics", "> 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif elapsed > 60: if total", "= gp['k_count'] kills2 = gp['k_count'].shift() deaths1 = gp['d_count'] deaths2 =", "distance b/w long text BoW cos_dist_st = [np.nan] # cosine", "due to starting range at 1 cos_dist_lt = [np.nan] #", "f'{x} {y}', [x[1] for x in los1]) # Create bag", "gp in groupby: # Order the observations and prepare the", "idx = len(gp.columns) gp.insert(idx, 'cos_dist_lt', cos_dist_lt) gp.insert(idx, 'cos_dist_st', cos_dist_st) groups.append(gp)", "numpy as np import pandas as pd from sklearn.feature_extraction.text import", "or len(los2) == 0: return 0 doc1 = reduce(lambda x,", "b/w long text BoW cos_dist_st = [np.nan] # cosine distance", "for x in los1]) # Create bag of words doc2", "each group lap(\"Computing cosine distances and change in kd by", "normalized already. Arguments: los1: First document, a list of raw", "from local file...\") df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df = df.drop(columns=['HighSlotISK',", "document representation. 2. Converts the collection of raw documents to", "cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist", "'MidSlotISK', 'LowSlotISK', 'type', 'fill']) df = df.dropna() # Convert items", "killmails' item lists. 1. Converts collection of short text items", "# cosine distance b/w long text BoW cos_dist_st = [np.nan]", "cos_dist_lt) gp.insert(idx, 'cos_dist_st', cos_dist_st) groups.append(gp) # Record progress count +=", "kills1 = gp['k_count'] kills2 = gp['k_count'].shift() deaths1 = gp['d_count'] deaths2", "elapsed.\"\"\" global start, total elapsed = (time.time() - start) -", "gp['d_count'].shift() idx = len(gp.columns) gp.insert(idx, 'del_kdratio', (kills2 - kills1) /", "local file lap(\"Loading CSV data from local file...\") df =", "np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from", "file lap(\"Loading CSV data from local file...\") df = pd.read_csv(f'data/all_victims_complete_partialKD.csv',", "df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill']) df = df.dropna()", "groups and writing to file...\") df_res = pd.concat(groups) df_res.to_csv(f'data/useable_victims_distancesAndKD.csv') lap(\"Exit\")", "0 def lap(msg): \"\"\"Records time elapsed.\"\"\" global start, total elapsed", "/ (deaths2 - deaths1)) gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate pairs", "list of raw strings. Returns: cosine distance as a value", "= TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words cos_dist", "0 or len(los2) == 0: return 0 doc1 = reduce(lambda", "if type(los1) == float or type(los2) == float: return 0", "count = 0 # current group number out of number", "adding nan pair los1 = items1.iloc[i] los2 = items2.iloc[i] pairs.append((los2,", "the collection of raw documents to a matrix of TF-IDF", "cosine distance between two killmails' item lists. 1. Converts collection", "metrics # start distance series with nan due to starting", "cosine distance return cos_dist # Load CSV from local file", "BoW for pair in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx", "cosine similarity between feature vectors. Uses linear kernel since TF-IDF", "raw strings. los2: Second document, a list of raw strings.", "df['items'] = df['items'].apply(literal_eval) # Group DataFrame by character_id and compute", "\"\"\"Computes distance between killmails by text similarity. Edit Distance Metrics", "calculator). 3. Computes cosine similarity between feature vectors. Uses linear", "between killmails by text similarity. Edit Distance Metrics - Levenshtein", "being identical and 0 being complete different. \"\"\" if type(los1)", "\"\"\" if type(los1) == float or type(los2) == float: return", "type(los1) == float or type(los2) == float: return 0 if", "= (gp.sort_values(by=['killmail_id']) .reset_index() .drop('index', axis=1)) # Generate change in kills", "Edit Distance Metrics - Levenshtein Distance - Damerau-Levenshtein Distance -", "total > 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else: if", "items2 = gp['items'].shift() for i in range(1, len(gp)): # Start", "# group dataframe by character_id num_groups = len(groupby) # get", "raw document representation. 2. Converts the collection of raw documents", "for x in los2]) # Create bag of words tfidf", "nan due to starting range at 1 cos_dist_lt = [np.nan]", "cos_dist def get_short_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between two killmails'", "pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import", "get number of groups count = 0 # current group", "reduce(lambda x, y: f'{x} {y}', [x[1] for x in los1])", "TfidfVectorizer (combines vector counting and TF-IDF calculator). 3. Computes cosine", "return 0 doc1 = reduce(lambda x, y: f'{x} {y}', [x[1]", "Returns: cosine distance as a value between 0-1, with 1", "[] # list to append modified group dataframes to for", "item lists. 1. Converts collection of short text items to", "value types...\") df['items'] = df['items'].apply(literal_eval) # Group DataFrame by character_id", "> 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap(\"Importing modules...\") from", "Load CSV from local file lap(\"Loading CSV data from local", "lap(\"Converting 'item' column value types...\") df['items'] = df['items'].apply(literal_eval) # Group", "start if elapsed > 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif elapsed >", "by text similarity. Edit Distance Metrics - Levenshtein Distance -", "document, a list of raw strings. Returns: cosine distance as", "file...\") df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK',", "kd by grouping character_id's...\") groupby = df.groupby('character_id') # group dataframe", "in kd by grouping character_id's...\") groupby = df.groupby('character_id') # group", "timing import time start = time.time() total = 0 def", "for pair in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx =", "words tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of", "sys import numpy as np import pandas as pd from", "los1 = items1.iloc[i] los2 = items2.iloc[i] pairs.append((los2, los1)) # Generate", "3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif elapsed > 60: if total >", "series using pairs list and different metrics # start distance", "time.time() - start if elapsed > 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif", "pairs of observations sequentially to compare pairs = [] items1", "- start) - total total = time.time() - start if", "from functools import reduce import os import sys import numpy", "los2 = items2.iloc[i] pairs.append((los2, los1)) # Generate distance series using", "elif elapsed > 60: if total > 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}')", "{msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else: if total > 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h)", "vector counting and TF-IDF calculator). 3. Computes cosine similarity between", "= time.time() total = 0 def lap(msg): \"\"\"Records time elapsed.\"\"\"", "= df.dropna() # Convert items column to correct data type", "doc2 = reduce(lambda x, y: f'{x} {y}', [x[0] for x", "Convert items column to correct data type lap(\"Converting 'item' column", "number of groups count = 0 # current group number", "distance series with nan due to starting range at 1", "for name, gp in groupby: # Order the observations and", "in deaths and change in kd ratio kills1 = gp['k_count']", "num_groups = len(groupby) # get number of groups count =", "total total = time.time() - start if elapsed > 3600:", "identical. \"\"\" if type(los1) == float or type(los2) == float:", "'type', 'fill']) df = df.dropna() # Convert items column to", "lists. 1. Converts collection of short text items to raw", "to append modified group dataframes to for name, gp in", "Jaro Distance - Jaro-Winkler Distance - Match Rating Approach Comparison", "= items2.iloc[i] pairs.append((los2, los1)) # Generate distance series using pairs", "# Create bag of words tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) #", "count += 1 print(f\"Progress {count/num_groups:2.1%}\", end=\"\\r\") lap(\"Concatenating resulting groups and", "# Record progress count += 1 print(f\"Progress {count/num_groups:2.1%}\", end=\"\\r\") lap(\"Concatenating", "'fill']) df = df.dropna() # Convert items column to correct", "= 0 # current group number out of number of", "import reduce import os import sys import numpy as np", "start) - total total = time.time() - start if elapsed", "Distance - Damerau-Levenshtein Distance - Jaro Distance - Jaro-Winkler Distance", "df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill']) df = df.dropna() # Convert", "Metrics - Levenshtein Distance - Damerau-Levenshtein Distance - Jaro Distance", "Group DataFrame by character_id and compute distance series for each", "60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap(\"Importing modules...\") from ast", "in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx = len(gp.columns) gp.insert(idx,", "import os import sys import numpy as np import pandas", "representation. 2. Converts the collection of raw documents to a", "f'{x} {y}', [x[0] for x in los2]) # Create bag", "of raw documents to a matrix of TF-IDF features using", "of groups count = 0 # current group number out", "prepare the dataframe gp = (gp.sort_values(by=['killmail_id']) .reset_index() .drop('index', axis=1)) #", "Generate change in kills over change in deaths and change", "distance between two killmails' item lists. 1. Converts collection of", "sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel def get_long_text_cosine_distance(los1, los2):", "los2: Second document, a list of raw strings. Returns: cosine", "linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist # Load", "# list to append modified group dataframes to for name,", "lap(\"Concatenating resulting groups and writing to file...\") df_res = pd.concat(groups)", "gp.insert(idx, 'cos_dist_lt', cos_dist_lt) gp.insert(idx, 'cos_dist_st', cos_dist_st) groups.append(gp) # Record progress", "print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap(\"Importing modules...\") from ast import", "correct data type lap(\"Converting 'item' column value types...\") df['items'] =", "len(groupby) # get number of groups count = 0 #", "= gp['d_count'] deaths2 = gp['d_count'].shift() idx = len(gp.columns) gp.insert(idx, 'del_kdratio',", "raw strings. Returns: cosine distance as a value between 0-1,", "killmails' item lists. 1. Converts collection of long text items", "y: f'{x} {y}', [x[0] for x in los1]) # Create", "being complete different. \"\"\" if type(los1) == float or type(los2)", "of groups groups = [] # list to append modified", "= df['items'].apply(literal_eval) # Group DataFrame by character_id and compute distance", "else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else: if total > 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}')", "kills over change in deaths and change in kd ratio", "words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return", "- Jaro Distance - Jaro-Winkler Distance - Match Rating Approach", "-*- coding: utf-8 -*- \"\"\"Computes distance between killmails by text", "3. Computes cosine similarity between feature vectors. Uses linear kernel", "sequentially to compare pairs = [] items1 = gp['items'] items2", "to raw document representation. 2. Converts the collection of raw", "with 1 being identical and 0 being complete different. \"\"\"", "else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap(\"Importing modules...\") from ast import literal_eval from", "Second document, a list of raw strings. Returns: cosine distance", "cosine distance b/w long text BoW cos_dist_st = [np.nan] #", "tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words", "1 cos_dist_lt = [np.nan] # cosine distance b/w long text", "pair los1 = items1.iloc[i] los2 = items2.iloc[i] pairs.append((los2, los1)) #", "Metrics - Jaccard Similarity - Cosine Distance Written By: <NAME>", "out of number of groups groups = [] # list", "of words doc2 = reduce(lambda x, y: f'{x} {y}', [x[0]", "avoid adding nan pair los1 = items1.iloc[i] los2 = items2.iloc[i]", "- Damerau-Levenshtein Distance - Jaro Distance - Jaro-Winkler Distance -", "literal_eval from functools import reduce import os import sys import", "{msg}') lap(\"Importing modules...\") from ast import literal_eval from functools import", "# Load CSV from local file lap(\"Loading CSV data from", "cosine distance return cos_dist def get_short_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance", "similarity. Edit Distance Metrics - Levenshtein Distance - Damerau-Levenshtein Distance", "= len(groupby) # get number of groups count = 0", "Order the observations and prepare the dataframe gp = (gp.sort_values(by=['killmail_id'])", "and change in kd ratio kills1 = gp['k_count'] kills2 =", "- Match Rating Approach Comparison - Hamming Distance Vector Distance", "pair in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx = len(gp.columns)", "TF-IDF calculator). 3. Computes cosine similarity between feature vectors. Uses", "group dataframe by character_id num_groups = len(groupby) # get number", "features using TfidfVectorizer (combines vector counting and TF-IDF calculator). 3.", "los1: First document, a list of raw strings. los2: Second", "'item' column value types...\") df['items'] = df['items'].apply(literal_eval) # Group DataFrame", "(kills2 - kills1) / (deaths2 - deaths1)) gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift())", "Converts collection of short text items to raw document representation.", "compare pairs = [] items1 = gp['items'] items2 = gp['items'].shift()", "feature vectors. Uses linear kernel since TF-IDF matrix will be", "items1.iloc[i] los2 = items2.iloc[i] pairs.append((los2, los1)) # Generate distance series", "gp['k_count'] kills2 = gp['k_count'].shift() deaths1 = gp['d_count'] deaths2 = gp['d_count'].shift()", "gp['items'].shift() for i in range(1, len(gp)): # Start from 1", "long text BoW cos_dist_st = [np.nan] # cosine distance b/w", "items2.iloc[i] pairs.append((los2, los1)) # Generate distance series using pairs list", "\"\"\"Calculates cosine distance between two killmails' item lists. 1. Converts", "column to correct data type lap(\"Converting 'item' column value types...\")", "functools import reduce import os import sys import numpy as", "By: <NAME> Updated On: 11/09/2019 \"\"\" # Start timing import", "distance series using pairs list and different metrics # start", "1 print(f\"Progress {count/num_groups:2.1%}\", end=\"\\r\") lap(\"Concatenating resulting groups and writing to", "cosine distance as a value between 0-1, with 1 being", "(gp.sort_values(by=['killmail_id']) .reset_index() .drop('index', axis=1)) # Generate change in kills over", "between 0-1, with 1 being identical and 0 being complete", "On: 11/09/2019 \"\"\" # Start timing import time start =", "total = 0 def lap(msg): \"\"\"Records time elapsed.\"\"\" global start,", "pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel def", "list to append modified group dataframes to for name, gp", "Compute cosine distance return cos_dist def get_short_text_cosine_distance(los1, los2): \"\"\"Calculates cosine", "current group number out of number of groups groups =", "since TF-IDF matrix will be normalized already. Arguments: los1: First", "bag of words doc2 = reduce(lambda x, y: f'{x} {y}',", "of short text items to raw document representation. 2. Converts", "BoW cos_dist_st = [np.nan] # cosine distance b/w short text", "text BoW cos_dist_st = [np.nan] # cosine distance b/w short", "pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx = len(gp.columns) gp.insert(idx, 'cos_dist_lt', cos_dist_lt) gp.insert(idx,", "between feature vectors. Uses linear kernel since TF-IDF matrix will", "group lap(\"Computing cosine distances and change in kd by grouping", "x, y: f'{x} {y}', [x[0] for x in los2]) #", "[np.nan] # cosine distance b/w short text BoW for pair", "float or type(los2) == float: return 0 if len(los1) ==", "elapsed > 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif elapsed > 60: if", "== float: return 0 if len(los1) == 0 or len(los2)", "identical and 0 being complete different. \"\"\" if type(los1) ==", "data from local file...\") df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df =", "# Generate change in kills over change in deaths and", "idx = len(gp.columns) gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2", "Jaro-Winkler Distance - Match Rating Approach Comparison - Hamming Distance", "= items1.iloc[i] los2 = items2.iloc[i] pairs.append((los2, los1)) # Generate distance", "import numpy as np import pandas as pd from sklearn.feature_extraction.text", "ratio kills1 = gp['k_count'] kills2 = gp['k_count'].shift() deaths1 = gp['d_count']", "gp['d_count'] deaths2 = gp['d_count'].shift() idx = len(gp.columns) gp.insert(idx, 'del_kdratio', (kills2", "distance as a value between 0-1, with 1 being identical.", "time elapsed.\"\"\" global start, total elapsed = (time.time() - start)", "= len(gp.columns) gp.insert(idx, 'cos_dist_lt', cos_dist_lt) gp.insert(idx, 'cos_dist_st', cos_dist_st) groups.append(gp) #", "similarity between feature vectors. Uses linear kernel since TF-IDF matrix", "tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist def get_short_text_cosine_distance(los1, los2):", "'LowSlotISK', 'type', 'fill']) df = df.dropna() # Convert items column", "def get_long_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between two killmails' item", "different metrics # start distance series with nan due to", "TF-IDF features using TfidfVectorizer (combines vector counting and TF-IDF calculator).", "<filename>Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py # -*- coding: utf-8 -*- \"\"\"Computes distance between killmails", "short text BoW for pair in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0],", "strings. Returns: cosine distance as a value between 0-1, with", "= df.groupby('character_id') # group dataframe by character_id num_groups = len(groupby)", "# current group number out of number of groups groups", "# Create bag of words doc2 = reduce(lambda x, y:", "Jaccard Similarity - Cosine Distance Written By: <NAME> Updated On:", "a list of raw strings. los2: Second document, a list", "lap(\"Computing cosine distances and change in kd by grouping character_id's...\")", "of words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance", "groups count = 0 # current group number out of", "> 60: if total > 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m)", "strings. los2: Second document, a list of raw strings. Returns:", "deaths and change in kd ratio kills1 = gp['k_count'] kills2", "[x[0] for x in los1]) # Create bag of words", "cos_dist_lt = [np.nan] # cosine distance b/w long text BoW", "Arguments: los1: First document, a list of raw strings. los2:", "range(1, len(gp)): # Start from 1 to avoid adding nan", "observations and prepare the dataframe gp = (gp.sort_values(by=['killmail_id']) .reset_index() .drop('index',", "print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else: if total > 3600:", "in range(1, len(gp)): # Start from 1 to avoid adding", "- Hamming Distance Vector Distance Metrics - Jaccard Similarity -", "Vector Distance Metrics - Jaccard Similarity - Cosine Distance Written", "TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel def get_long_text_cosine_distance(los1, los2): \"\"\"Calculates cosine", "types...\") df['items'] = df['items'].apply(literal_eval) # Group DataFrame by character_id and", "group number out of number of groups groups = []", "0 # current group number out of number of groups", "pair[1])) idx = len(gp.columns) gp.insert(idx, 'cos_dist_lt', cos_dist_lt) gp.insert(idx, 'cos_dist_st', cos_dist_st)", "Cosine Distance Written By: <NAME> Updated On: 11/09/2019 \"\"\" #", "= time.time() - start if elapsed > 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}')", "- start if elapsed > 3600: print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}') elif elapsed", "3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif total > 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else:", "list of raw strings. los2: Second document, a list of", "get_short_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between two killmails' item lists.", "distance return cos_dist # Load CSV from local file lap(\"Loading", "distances and change in kd by grouping character_id's...\") groupby =", "global start, total elapsed = (time.time() - start) - total", "total > 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif total > 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m)", "long text items to raw document representation. 2. Converts the", "[] items1 = gp['items'] items2 = gp['items'].shift() for i in", "total elapsed = (time.time() - start) - total total =", "elapsed = (time.time() - start) - total total = time.time()", "from 1 to avoid adding nan pair los1 = items1.iloc[i]", "# Group DataFrame by character_id and compute distance series for", "# Start timing import time start = time.time() total =", "from sklearn.metrics.pairwise import linear_kernel def get_long_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance", "'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1)) gp.insert(idx+1, 'kd_ratio_diff',", "list and different metrics # start distance series with nan", "collection of short text items to raw document representation. 2.", "gp['items'] items2 = gp['items'].shift() for i in range(1, len(gp)): #", "gp = (gp.sort_values(by=['killmail_id']) .reset_index() .drop('index', axis=1)) # Generate change in", ".reset_index() .drop('index', axis=1)) # Generate change in kills over change", "doc1 = reduce(lambda x, y: f'{x} {y}', [x[1] for x", "for i in range(1, len(gp)): # Start from 1 to", "to correct data type lap(\"Converting 'item' column value types...\") df['items']", "kills1) / (deaths2 - deaths1)) gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate", "# Generate pairs of observations sequentially to compare pairs =", "cos_dist_st = [np.nan] # cosine distance b/w short text BoW", "Distance - Jaro-Winkler Distance - Match Rating Approach Comparison -", "doc2 = reduce(lambda x, y: f'{x} {y}', [x[1] for x", "los1]) # Create bag of words doc2 = reduce(lambda x,", "starting range at 1 cos_dist_lt = [np.nan] # cosine distance", "Written By: <NAME> Updated On: 11/09/2019 \"\"\" # Start timing", "3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else: if total >", "modules...\") from ast import literal_eval from functools import reduce import", "def get_short_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between two killmails' item", "in groupby: # Order the observations and prepare the dataframe", "value between 0-1, with 1 being identical. \"\"\" if type(los1)", "# Compute cosine distance return cos_dist def get_short_text_cosine_distance(los1, los2): \"\"\"Calculates", "= [np.nan] # cosine distance b/w long text BoW cos_dist_st", "text BoW for pair in pairs: cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1])) cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1]))", "distance between killmails by text similarity. Edit Distance Metrics -", "[x[0] for x in los2]) # Create bag of words", "= linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist #", "as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer", "x in los1]) # Create bag of words doc2 =", "deaths2 = gp['d_count'].shift() idx = len(gp.columns) gp.insert(idx, 'del_kdratio', (kills2 -", "raw documents to a matrix of TF-IDF features using TfidfVectorizer", "coding: utf-8 -*- \"\"\"Computes distance between killmails by text similarity.", "> 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else: if total", ".drop('index', axis=1)) # Generate change in kills over change in", "Distance Vector Distance Metrics - Jaccard Similarity - Cosine Distance", "print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}') lap(\"Importing modules...\") from ast import literal_eval from functools", "two killmails' item lists. 1. Converts collection of long text", "1 to avoid adding nan pair los1 = items1.iloc[i] los2", "Vectorize the bag of words cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] #", "pairs.append((los2, los1)) # Generate distance series using pairs list and", "11/09/2019 \"\"\" # Start timing import time start = time.time()", "Damerau-Levenshtein Distance - Jaro Distance - Jaro-Winkler Distance - Match", "Comparison - Hamming Distance Vector Distance Metrics - Jaccard Similarity", "f'{x} {y}', [x[0] for x in los1]) # Create bag", "to starting range at 1 cos_dist_lt = [np.nan] # cosine", "axis=1)) # Generate change in kills over change in deaths", "change in kills over change in deaths and change in", "print(f\"Progress {count/num_groups:2.1%}\", end=\"\\r\") lap(\"Concatenating resulting groups and writing to file...\")", "character_id and compute distance series for each group lap(\"Computing cosine", "= 0 def lap(msg): \"\"\"Records time elapsed.\"\"\" global start, total", "1 being identical. \"\"\" if type(los1) == float or type(los2)", "lap(\"Loading CSV data from local file...\") df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8')", "of words tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag", "change in deaths and change in kd ratio kills1 =", "being identical. \"\"\" if type(los1) == float or type(los2) ==", "Similarity - Cosine Distance Written By: <NAME> Updated On: 11/09/2019", "-*- \"\"\"Computes distance between killmails by text similarity. Edit Distance", "and change in kd by grouping character_id's...\") groupby = df.groupby('character_id')", "len(los2) == 0: return 0 doc1 = reduce(lambda x, y:", "grouping character_id's...\") groupby = df.groupby('character_id') # group dataframe by character_id", "items column to correct data type lap(\"Converting 'item' column value", "(time.time() - start) - total total = time.time() - start", "First document, a list of raw strings. los2: Second document,", "return cos_dist def get_short_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between two", "- Jaccard Similarity - Cosine Distance Written By: <NAME> Updated", "Hamming Distance Vector Distance Metrics - Jaccard Similarity - Cosine", "text items to raw document representation. 2. Converts the collection", "len(gp.columns) gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1))", "observations sequentially to compare pairs = [] items1 = gp['items']", "len(gp)): # Start from 1 to avoid adding nan pair", "os import sys import numpy as np import pandas as", "over change in deaths and change in kd ratio kills1", "== 0 or len(los2) == 0: return 0 doc1 =", "df = df.dropna() # Convert items column to correct data", "if total > 3600: print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}') else: print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else:", "deaths1)) gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate pairs of observations sequentially", "return 0 if len(los1) == 0 or len(los2) == 0:", "from ast import literal_eval from functools import reduce import os", "reduce(lambda x, y: f'{x} {y}', [x[0] for x in los2])", "between two killmails' item lists. 1. Converts collection of short", "else: if total > 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif total >", "matrix of TF-IDF features using TfidfVectorizer (combines vector counting and", "start = time.time() total = 0 def lap(msg): \"\"\"Records time", "bag of words tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the", "pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill']) df", "los2): \"\"\"Calculates cosine distance between two killmails' item lists. 1.", "already. Arguments: los1: First document, a list of raw strings.", "Updated On: 11/09/2019 \"\"\" # Start timing import time start", "item lists. 1. Converts collection of long text items to", "- kills1) / (deaths2 - deaths1)) gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift()) #", "doc1 = reduce(lambda x, y: f'{x} {y}', [x[0] for x", "lap(msg): \"\"\"Records time elapsed.\"\"\" global start, total elapsed = (time.time()", "= [np.nan] # cosine distance b/w short text BoW for", "{msg}') elif total > 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}')", "a matrix of TF-IDF features using TfidfVectorizer (combines vector counting", "- Jaro-Winkler Distance - Match Rating Approach Comparison - Hamming", "documents to a matrix of TF-IDF features using TfidfVectorizer (combines", "words doc2 = reduce(lambda x, y: f'{x} {y}', [x[1] for", "append modified group dataframes to for name, gp in groupby:", "'cos_dist_lt', cos_dist_lt) gp.insert(idx, 'cos_dist_st', cos_dist_st) groups.append(gp) # Record progress count", "import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise", "x, y: f'{x} {y}', [x[0] for x in los1]) #", "resulting groups and writing to file...\") df_res = pd.concat(groups) df_res.to_csv(f'data/useable_victims_distancesAndKD.csv')", "and TF-IDF calculator). 3. Computes cosine similarity between feature vectors.", "= df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill']) df = df.dropna() #", "in kd ratio kills1 = gp['k_count'] kills2 = gp['k_count'].shift() deaths1", "return 0 doc1 = reduce(lambda x, y: f'{x} {y}', [x[0]", "data type lap(\"Converting 'item' column value types...\") df['items'] = df['items'].apply(literal_eval)", "start distance series with nan due to starting range at", "# Convert items column to correct data type lap(\"Converting 'item'", "as pd from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel", "{msg}') else: if total > 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif total", "import literal_eval from functools import reduce import os import sys", "different. \"\"\" if type(los1) == float or type(los2) == float:", "Create bag of words doc2 = reduce(lambda x, y: f'{x}", "TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words cos_dist =", "linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance return cos_dist def get_short_text_cosine_distance(los1,", "Levenshtein Distance - Damerau-Levenshtein Distance - Jaro Distance - Jaro-Winkler", "= pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8') df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill'])", "# start distance series with nan due to starting range", "groupby = df.groupby('character_id') # group dataframe by character_id num_groups =", "= gp['k_count'].shift() deaths1 = gp['d_count'] deaths2 = gp['d_count'].shift() idx =", "between two killmails' item lists. 1. Converts collection of long", "counting and TF-IDF calculator). 3. Computes cosine similarity between feature", "# cosine distance b/w short text BoW for pair in", "a value between 0-1, with 1 being identical. \"\"\" if", "kernel since TF-IDF matrix will be normalized already. Arguments: los1:", "progress count += 1 print(f\"Progress {count/num_groups:2.1%}\", end=\"\\r\") lap(\"Concatenating resulting groups", "df.groupby('character_id') # group dataframe by character_id num_groups = len(groupby) #", "number out of number of groups groups = [] #", "will be normalized already. Arguments: los1: First document, a list", "distance return cos_dist def get_short_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between", "groups = [] # list to append modified group dataframes", "print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}') else: if total > 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif", "gp['kd_ratio']-gp['kd_ratio'].shift()) # Generate pairs of observations sequentially to compare pairs", "collection of long text items to raw document representation. 2.", "words doc2 = reduce(lambda x, y: f'{x} {y}', [x[0] for", "sklearn.metrics.pairwise import linear_kernel def get_long_text_cosine_distance(los1, los2): \"\"\"Calculates cosine distance between", "cos_dist # Load CSV from local file lap(\"Loading CSV data", "deaths1 = gp['d_count'] deaths2 = gp['d_count'].shift() idx = len(gp.columns) gp.insert(idx,", "print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif total > 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}') else: print(f'(+{elapsed:.3f}s|t:{total:.3f}s)", "linear kernel since TF-IDF matrix will be normalized already. Arguments:", "dataframe gp = (gp.sort_values(by=['killmail_id']) .reset_index() .drop('index', axis=1)) # Generate change", "float: return 0 if len(los1) == 0 or len(los2) ==", "> 3600: print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}') elif total > 60: print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}')", "cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1])) idx = len(gp.columns) gp.insert(idx, 'cos_dist_lt', cos_dist_lt) gp.insert(idx, 'cos_dist_st'," ]
[ "Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[0, :] = Board.piece_symbol_to_int(\"p\") all_possible_moves.update( set(map(lambda move:", "[\"Q\", \"N\"] ): array[i][j] = Board.piece_symbol_to_int(piece) all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()),", "array[1, :] = Board.piece_symbol_to_int(\"P\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) )", "all_possible_moves = set() array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype(\"int8\") for i, j,", ":] = Board.piece_symbol_to_int(\"P\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[0,", "array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype(\"int8\") for i, j, piece in product(", "all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[i][j] = 0 #", "List[Move]: all_possible_moves = set() array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype(\"int8\") for i,", "i, j, piece in product( range(ConfigChess.board_size), range(ConfigChess.board_size), [\"Q\", \"N\"] ):", "range(ConfigChess.board_size), range(ConfigChess.board_size), [\"Q\", \"N\"] ): array[i][j] = Board.piece_symbol_to_int(piece) all_possible_moves.update( set(map(lambda", "Board.piece_symbol_to_int(\"P\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[0, :] =", "for i, j, piece in product( range(ConfigChess.board_size), range(ConfigChess.board_size), [\"Q\", \"N\"]", ") array[0, :] = Board.piece_symbol_to_int(\"p\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))", "be added with queen moves under UCI notation return sorted(list(all_possible_moves))", "= np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype(\"int8\") for i, j, piece in product( range(ConfigChess.board_size),", "0 # underpromotion moves array[1, :] = Board.piece_symbol_to_int(\"P\") all_possible_moves.update( set(map(lambda", ") # no need to add castling moves: they have", "from src.chess.board import Board from src.chess.move import Move def get_all_possible_moves()", "import product from typing import List from src.config import ConfigChess", "ConfigChess.board_size)).astype(\"int8\") for i, j, piece in product( range(ConfigChess.board_size), range(ConfigChess.board_size), [\"Q\",", "set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) # no need to add", "product( range(ConfigChess.board_size), range(ConfigChess.board_size), [\"Q\", \"N\"] ): array[i][j] = Board.piece_symbol_to_int(piece) all_possible_moves.update(", "= Board.piece_symbol_to_int(\"P\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[0, :]", "= Board.piece_symbol_to_int(piece) all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[i][j] =", "array[0, :] = Board.piece_symbol_to_int(\"p\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) )", "Board.piece_symbol_to_int(piece) all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[i][j] = 0", "def get_all_possible_moves() -> List[Move]: all_possible_moves = set() array = np.zeros((ConfigChess.board_size,", "src.config import ConfigChess from src.chess.board import Board from src.chess.move import", "= set() array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype(\"int8\") for i, j, piece", "np from itertools import product from typing import List from", "from src.chess.move import Move def get_all_possible_moves() -> List[Move]: all_possible_moves =", "typing import List from src.config import ConfigChess from src.chess.board import", "piece in product( range(ConfigChess.board_size), range(ConfigChess.board_size), [\"Q\", \"N\"] ): array[i][j] =", "Move def get_all_possible_moves() -> List[Move]: all_possible_moves = set() array =", "ConfigChess from src.chess.board import Board from src.chess.move import Move def", "Board from src.chess.move import Move def get_all_possible_moves() -> List[Move]: all_possible_moves", "from src.config import ConfigChess from src.chess.board import Board from src.chess.move", "already be added with queen moves under UCI notation return", "import Move def get_all_possible_moves() -> List[Move]: all_possible_moves = set() array", "set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[i][j] = 0 # underpromotion", "move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[0, :] = Board.piece_symbol_to_int(\"p\") all_possible_moves.update( set(map(lambda", "): array[i][j] = Board.piece_symbol_to_int(piece) all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) )", ") array[i][j] = 0 # underpromotion moves array[1, :] =", "np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype(\"int8\") for i, j, piece in product( range(ConfigChess.board_size), range(ConfigChess.board_size),", "Move(uci=move.uci()), Board(array=array).legal_moves)) ) # no need to add castling moves:", "add castling moves: they have already be added with queen", "to add castling moves: they have already be added with", ":] = Board.piece_symbol_to_int(\"p\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) #", "import numpy as np from itertools import product from typing", "in product( range(ConfigChess.board_size), range(ConfigChess.board_size), [\"Q\", \"N\"] ): array[i][j] = Board.piece_symbol_to_int(piece)", "Board(array=array).legal_moves)) ) # no need to add castling moves: they", "as np from itertools import product from typing import List", "move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) # no need to add castling", "array[i][j] = Board.piece_symbol_to_int(piece) all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[i][j]", "array[i][j] = 0 # underpromotion moves array[1, :] = Board.piece_symbol_to_int(\"P\")", "all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[0, :] = Board.piece_symbol_to_int(\"p\")", "castling moves: they have already be added with queen moves", "Board(array=array).legal_moves)) ) array[i][j] = 0 # underpromotion moves array[1, :]", "import Board from src.chess.move import Move def get_all_possible_moves() -> List[Move]:", "# no need to add castling moves: they have already", "get_all_possible_moves() -> List[Move]: all_possible_moves = set() array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype(\"int8\")", "range(ConfigChess.board_size), [\"Q\", \"N\"] ): array[i][j] = Board.piece_symbol_to_int(piece) all_possible_moves.update( set(map(lambda move:", "from itertools import product from typing import List from src.config", "import ConfigChess from src.chess.board import Board from src.chess.move import Move", "set() array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype(\"int8\") for i, j, piece in", "Board.piece_symbol_to_int(\"p\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) # no need", "they have already be added with queen moves under UCI", "src.chess.move import Move def get_all_possible_moves() -> List[Move]: all_possible_moves = set()", "List from src.config import ConfigChess from src.chess.board import Board from", "j, piece in product( range(ConfigChess.board_size), range(ConfigChess.board_size), [\"Q\", \"N\"] ): array[i][j]", "underpromotion moves array[1, :] = Board.piece_symbol_to_int(\"P\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()),", "no need to add castling moves: they have already be", "have already be added with queen moves under UCI notation", "Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[i][j] = 0 # underpromotion moves array[1,", "# underpromotion moves array[1, :] = Board.piece_symbol_to_int(\"P\") all_possible_moves.update( set(map(lambda move:", "itertools import product from typing import List from src.config import", "move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[i][j] = 0 # underpromotion moves", "product from typing import List from src.config import ConfigChess from", "all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) # no need to", "moves: they have already be added with queen moves under", "moves array[1, :] = Board.piece_symbol_to_int(\"P\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))", "import List from src.config import ConfigChess from src.chess.board import Board", "\"N\"] ): array[i][j] = Board.piece_symbol_to_int(piece) all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))", "src.chess.board import Board from src.chess.move import Move def get_all_possible_moves() ->", "Board(array=array).legal_moves)) ) array[0, :] = Board.piece_symbol_to_int(\"p\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()),", "need to add castling moves: they have already be added", "set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) array[0, :] = Board.piece_symbol_to_int(\"p\") all_possible_moves.update(", "= Board.piece_symbol_to_int(\"p\") all_possible_moves.update( set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves)) ) # no", "numpy as np from itertools import product from typing import", "from typing import List from src.config import ConfigChess from src.chess.board", "-> List[Move]: all_possible_moves = set() array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype(\"int8\") for", "= 0 # underpromotion moves array[1, :] = Board.piece_symbol_to_int(\"P\") all_possible_moves.update(" ]
[ "self.y1 + scaled_effort # Don't let the vehcicle fall through", "Args: effort (float): related to the upward thrust of the", "<= 0 and net_acceleration < 0: y0dot = 0 y1dot", "net_acceleration self.y0 += y0dot * self.dt self.y1 += y1dot *", "self.y0 += gauss(0, self.noise) return self.y0, self.y1 def get_altitude(self): \"\"\"Return", "gauss(0, self.noise) return self.y0, self.y1 def get_altitude(self): \"\"\"Return the current", "the vehicle emc (float): electromechanical constant for the vehicle dt", "upward thrust of the vehicle, it must be >= 0", "= 0 y1dot = 0 else: y0dot = self.y1 y1dot", "* self.y1 + scaled_effort # Don't let the vehcicle fall", "the vehcicle fall through the ground if self.y0 <= 0", "the ground if self.y0 <= 0 and net_acceleration < 0:", "\"\"\" effort = max(0, effort) scaled_effort = self.emc / self.mass", "multirotor vehicle.\"\"\" GRAVITY = -9.81 def __init__( self, altitude=10, velocity=0,", "of the vehicle, it must be >= 0 Return: The", "= -9.81 def __init__( self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05,", "self.emc / self.mass * effort net_acceleration = MultiRotor.GRAVITY - 0.75", "net_acceleration < 0: y0dot = 0 y1dot = 0 else:", "The current state (altitude, velocity) of the vehicle. \"\"\" effort", "= altitude self.y1 = velocity self.mass = mass self.emc =", "for the vehicle dt (float): simulation time step noise (float):", "the current altitude.\"\"\" return self.y0 def get_delta_time(self): \"\"\"Return the simulation", "altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1 ): \"\"\" Args: altitude", "vehicle mass (float): mass of the vehicle emc (float): electromechanical", "mass of the vehicle emc (float): electromechanical constant for the", "and apply motor forces. Args: effort (float): related to the", "mass self.emc = emc self.dt = dt self.noise = noise", "noise def step(self, effort): \"\"\"Advance the multirotor simulation and apply", "= max(0, effort) scaled_effort = self.emc / self.mass * effort", "def __init__( self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1 ):", "vertical dynamics for a multirotor vehicle.\"\"\" GRAVITY = -9.81 def", "and net_acceleration < 0: y0dot = 0 y1dot = 0", "= velocity self.mass = mass self.emc = emc self.dt =", "altitude (float): initial altitude of the vehicle velocity (float): initial", "0 else: y0dot = self.y1 y1dot = net_acceleration self.y0 +=", "= emc self.dt = dt self.noise = noise def step(self,", "apply motor forces. Args: effort (float): related to the upward", "vehicle. \"\"\" effort = max(0, effort) scaled_effort = self.emc /", "emc (float): electromechanical constant for the vehicle dt (float): simulation", "noise \"\"\" self.y0 = altitude self.y1 = velocity self.mass =", "dt self.noise = noise def step(self, effort): \"\"\"Advance the multirotor", "GRAVITY = -9.81 def __init__( self, altitude=10, velocity=0, mass=1.54, emc=10.0,", "get_altitude(self): \"\"\"Return the current altitude.\"\"\" return self.y0 def get_delta_time(self): \"\"\"Return", "the vehicle dt (float): simulation time step noise (float): standard", "the upward thrust of the vehicle, it must be >=", "): \"\"\" Args: altitude (float): initial altitude of the vehicle", "velocity (float): initial velocity of the vehicle mass (float): mass", "effort = max(0, effort) scaled_effort = self.emc / self.mass *", "self.y0 += y0dot * self.dt self.y1 += y1dot * self.dt", "noise (float): standard deviation of normally distributed simulation noise \"\"\"", "\"\"\"Advance the multirotor simulation and apply motor forces. Args: effort", "ground if self.y0 <= 0 and net_acceleration < 0: y0dot", "simulation time step noise (float): standard deviation of normally distributed", "vehcicle fall through the ground if self.y0 <= 0 and", "y0dot * self.dt self.y1 += y1dot * self.dt self.y0 +=", "of normally distributed simulation noise \"\"\" self.y0 = altitude self.y1", "gauss class MultiRotor: \"\"\"Simple vertical dynamics for a multirotor vehicle.\"\"\"", "related to the upward thrust of the vehicle, it must", "(float): simulation time step noise (float): standard deviation of normally", "multirotor simulation and apply motor forces. Args: effort (float): related", "def get_altitude(self): \"\"\"Return the current altitude.\"\"\" return self.y0 def get_delta_time(self):", "(float): related to the upward thrust of the vehicle, it", "standard deviation of normally distributed simulation noise \"\"\" self.y0 =", "max(0, effort) scaled_effort = self.emc / self.mass * effort net_acceleration", "noise=0.1 ): \"\"\" Args: altitude (float): initial altitude of the", "= self.emc / self.mass * effort net_acceleration = MultiRotor.GRAVITY -", "random import gauss class MultiRotor: \"\"\"Simple vertical dynamics for a", "y1dot * self.dt self.y0 += gauss(0, self.noise) return self.y0, self.y1", "= MultiRotor.GRAVITY - 0.75 * self.y1 + scaled_effort # Don't", "/ self.mass * effort net_acceleration = MultiRotor.GRAVITY - 0.75 *", "class MultiRotor: \"\"\"Simple vertical dynamics for a multirotor vehicle.\"\"\" GRAVITY", "self.emc = emc self.dt = dt self.noise = noise def", "\"\"\"Simple vertical dynamics for a multirotor vehicle.\"\"\" GRAVITY = -9.81", "(float): standard deviation of normally distributed simulation noise \"\"\" self.y0", "0 y1dot = 0 else: y0dot = self.y1 y1dot =", "self.y0, self.y1 def get_altitude(self): \"\"\"Return the current altitude.\"\"\" return self.y0", "self.y0 <= 0 and net_acceleration < 0: y0dot = 0", "(float): initial velocity of the vehicle mass (float): mass of", "self.dt self.y0 += gauss(0, self.noise) return self.y0, self.y1 def get_altitude(self):", "mass=1.54, emc=10.0, dt=0.05, noise=0.1 ): \"\"\" Args: altitude (float): initial", "the vehicle velocity (float): initial velocity of the vehicle mass", "(float): mass of the vehicle emc (float): electromechanical constant for", "time step noise (float): standard deviation of normally distributed simulation", "MultiRotor.GRAVITY - 0.75 * self.y1 + scaled_effort # Don't let", "the vehicle, it must be >= 0 Return: The current", "self.y1 += y1dot * self.dt self.y0 += gauss(0, self.noise) return", "# Don't let the vehcicle fall through the ground if", "current altitude.\"\"\" return self.y0 def get_delta_time(self): \"\"\"Return the simulation time", "effort): \"\"\"Advance the multirotor simulation and apply motor forces. Args:", "self.mass * effort net_acceleration = MultiRotor.GRAVITY - 0.75 * self.y1", "(float): electromechanical constant for the vehicle dt (float): simulation time", "0.75 * self.y1 + scaled_effort # Don't let the vehcicle", "altitude self.y1 = velocity self.mass = mass self.emc = emc", "__init__( self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1 ): \"\"\"", "vehicle emc (float): electromechanical constant for the vehicle dt (float):", "+= gauss(0, self.noise) return self.y0, self.y1 def get_altitude(self): \"\"\"Return the", "vehicle velocity (float): initial velocity of the vehicle mass (float):", "vehicle.\"\"\" GRAVITY = -9.81 def __init__( self, altitude=10, velocity=0, mass=1.54,", "distributed simulation noise \"\"\" self.y0 = altitude self.y1 = velocity", "altitude.\"\"\" return self.y0 def get_delta_time(self): \"\"\"Return the simulation time step.\"\"\"", "step(self, effort): \"\"\"Advance the multirotor simulation and apply motor forces.", "< 0: y0dot = 0 y1dot = 0 else: y0dot", "dynamics for a multirotor vehicle.\"\"\" GRAVITY = -9.81 def __init__(", "Return: The current state (altitude, velocity) of the vehicle. \"\"\"", "else: y0dot = self.y1 y1dot = net_acceleration self.y0 += y0dot", "forces. Args: effort (float): related to the upward thrust of", "self.y0 def get_delta_time(self): \"\"\"Return the simulation time step.\"\"\" return self.dt", "y0dot = self.y1 y1dot = net_acceleration self.y0 += y0dot *", "emc=10.0, dt=0.05, noise=0.1 ): \"\"\" Args: altitude (float): initial altitude", "= mass self.emc = emc self.dt = dt self.noise =", "0: y0dot = 0 y1dot = 0 else: y0dot =", "MultiRotor: \"\"\"Simple vertical dynamics for a multirotor vehicle.\"\"\" GRAVITY =", "state (altitude, velocity) of the vehicle. \"\"\" effort = max(0,", "net_acceleration = MultiRotor.GRAVITY - 0.75 * self.y1 + scaled_effort #", "simulation and apply motor forces. Args: effort (float): related to", "y1dot = net_acceleration self.y0 += y0dot * self.dt self.y1 +=", "+= y0dot * self.dt self.y1 += y1dot * self.dt self.y0", "import gauss class MultiRotor: \"\"\"Simple vertical dynamics for a multirotor", "\"\"\" self.y0 = altitude self.y1 = velocity self.mass = mass", "constant for the vehicle dt (float): simulation time step noise", "current state (altitude, velocity) of the vehicle. \"\"\" effort =", "effort) scaled_effort = self.emc / self.mass * effort net_acceleration =", "must be >= 0 Return: The current state (altitude, velocity)", "be >= 0 Return: The current state (altitude, velocity) of", "vehicle dt (float): simulation time step noise (float): standard deviation", "if self.y0 <= 0 and net_acceleration < 0: y0dot =", "normally distributed simulation noise \"\"\" self.y0 = altitude self.y1 =", "through the ground if self.y0 <= 0 and net_acceleration <", "y1dot = 0 else: y0dot = self.y1 y1dot = net_acceleration", "electromechanical constant for the vehicle dt (float): simulation time step", "self.dt = dt self.noise = noise def step(self, effort): \"\"\"Advance", "mass (float): mass of the vehicle emc (float): electromechanical constant", "y0dot = 0 y1dot = 0 else: y0dot = self.y1", "scaled_effort = self.emc / self.mass * effort net_acceleration = MultiRotor.GRAVITY", "velocity) of the vehicle. \"\"\" effort = max(0, effort) scaled_effort", "of the vehicle emc (float): electromechanical constant for the vehicle", "self.noise = noise def step(self, effort): \"\"\"Advance the multirotor simulation", "self.noise) return self.y0, self.y1 def get_altitude(self): \"\"\"Return the current altitude.\"\"\"", "self.mass = mass self.emc = emc self.dt = dt self.noise", "= noise def step(self, effort): \"\"\"Advance the multirotor simulation and", "of the vehicle mass (float): mass of the vehicle emc", "effort net_acceleration = MultiRotor.GRAVITY - 0.75 * self.y1 + scaled_effort", "self.y1 y1dot = net_acceleration self.y0 += y0dot * self.dt self.y1", "of the vehicle velocity (float): initial velocity of the vehicle", "emc self.dt = dt self.noise = noise def step(self, effort):", "= dt self.noise = noise def step(self, effort): \"\"\"Advance the", "* self.dt self.y0 += gauss(0, self.noise) return self.y0, self.y1 def", "thrust of the vehicle, it must be >= 0 Return:", "def step(self, effort): \"\"\"Advance the multirotor simulation and apply motor", "\"\"\"Return the current altitude.\"\"\" return self.y0 def get_delta_time(self): \"\"\"Return the", "Args: altitude (float): initial altitude of the vehicle velocity (float):", "return self.y0 def get_delta_time(self): \"\"\"Return the simulation time step.\"\"\" return", "self.y1 def get_altitude(self): \"\"\"Return the current altitude.\"\"\" return self.y0 def", "Don't let the vehcicle fall through the ground if self.y0", "dt=0.05, noise=0.1 ): \"\"\" Args: altitude (float): initial altitude of", "= self.y1 y1dot = net_acceleration self.y0 += y0dot * self.dt", "let the vehcicle fall through the ground if self.y0 <=", "-9.81 def __init__( self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1", "fall through the ground if self.y0 <= 0 and net_acceleration", "velocity of the vehicle mass (float): mass of the vehicle", "self.y1 = velocity self.mass = mass self.emc = emc self.dt", "motor forces. Args: effort (float): related to the upward thrust", "* effort net_acceleration = MultiRotor.GRAVITY - 0.75 * self.y1 +", "to the upward thrust of the vehicle, it must be", "scaled_effort # Don't let the vehcicle fall through the ground", "return self.y0, self.y1 def get_altitude(self): \"\"\"Return the current altitude.\"\"\" return", "a multirotor vehicle.\"\"\" GRAVITY = -9.81 def __init__( self, altitude=10,", "(altitude, velocity) of the vehicle. \"\"\" effort = max(0, effort)", "velocity self.mass = mass self.emc = emc self.dt = dt", "deviation of normally distributed simulation noise \"\"\" self.y0 = altitude", "self.dt self.y1 += y1dot * self.dt self.y0 += gauss(0, self.noise)", "from random import gauss class MultiRotor: \"\"\"Simple vertical dynamics for", "for a multirotor vehicle.\"\"\" GRAVITY = -9.81 def __init__( self,", "simulation noise \"\"\" self.y0 = altitude self.y1 = velocity self.mass", "* self.dt self.y1 += y1dot * self.dt self.y0 += gauss(0,", "the vehicle. \"\"\" effort = max(0, effort) scaled_effort = self.emc", "(float): initial altitude of the vehicle velocity (float): initial velocity", "0 and net_acceleration < 0: y0dot = 0 y1dot =", "step noise (float): standard deviation of normally distributed simulation noise", "altitude of the vehicle velocity (float): initial velocity of the", "= 0 else: y0dot = self.y1 y1dot = net_acceleration self.y0", "0 Return: The current state (altitude, velocity) of the vehicle.", "velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1 ): \"\"\" Args: altitude (float):", "self.y0 = altitude self.y1 = velocity self.mass = mass self.emc", "initial velocity of the vehicle mass (float): mass of the", "it must be >= 0 Return: The current state (altitude,", "self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1 ): \"\"\" Args:", "dt (float): simulation time step noise (float): standard deviation of", "= net_acceleration self.y0 += y0dot * self.dt self.y1 += y1dot", "\"\"\" Args: altitude (float): initial altitude of the vehicle velocity", "the multirotor simulation and apply motor forces. Args: effort (float):", "vehicle, it must be >= 0 Return: The current state", "of the vehicle. \"\"\" effort = max(0, effort) scaled_effort =", "- 0.75 * self.y1 + scaled_effort # Don't let the", "+= y1dot * self.dt self.y0 += gauss(0, self.noise) return self.y0,", ">= 0 Return: The current state (altitude, velocity) of the", "effort (float): related to the upward thrust of the vehicle,", "initial altitude of the vehicle velocity (float): initial velocity of", "the vehicle mass (float): mass of the vehicle emc (float):", "+ scaled_effort # Don't let the vehcicle fall through the" ]
[ ") -> Union[Dict[str, Any], List[Any]]: return self.request('delete', endpoint, data) def", "SignatureValidationError, StpmexException, ) from .resources import CuentaFisica, Orden, Resource, Saldo", "invalido', desc): raise InvalidField(**resp) elif id == 3 and desc", "import UnsupportedAlgorithm from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization", "): raise ClaveRastreoAlreadyInUse(**resp['resultado']) elif id == -7 and re.match(r'La cuenta", "invalido': raise InvalidRfcOrCurp(**resp) elif id == 1 and re.match(r'El campo", "# Some responses are enveloped resultado = resultado['resultado'] return resultado", "Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('delete', endpoint, data)", "elif id == 0 and re.match(r'El campo .+ es obligatorio',", "and 'Se rechaza por PLD' in error: raise PldRejected(**resp['resultado']) else:", "Response) -> None: if not response.ok: response.raise_for_status() resp = response.json()", "data: Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('delete',", "data) def request( self, method: str, endpoint: str, data: Dict[str,", "self.request('post', endpoint, data) def put( self, endpoint: str, data: Dict[str,", "response.json() if isinstance(resp, dict): try: _raise_description_error_exc(resp) except KeyError: ... try:", "except (AssertionError, KeyError): ... response.raise_for_status() def _raise_description_error_exc(resp: Dict) -> NoReturn:", "): self.timeout = timeout self.session = Session() self.session.headers['User-Agent'] = f'stpmex-python/{client_version}'", "validando la firma': raise SignatureValidationError(**resp['resultado']) elif id == 0 and", "obligatorio', error): raise MandatoryField(**resp['resultado']) elif id == -1 and re.match(", "NoReturn, Union from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.backends import default_backend", "id == 3 and desc == 'Cuenta Duplicada': raise DuplicatedAccount(**resp)", "not response.ok: response.raise_for_status() resp = response.json() if isinstance(resp, dict): try:", "return self.request('post', endpoint, data) def put( self, endpoint: str, data:", "elif id == -9 and re.match(r'La Institucion \\d+ no es", "priv_key_passphrase: str, demo: bool = False, base_url: str = None,", "Resource._client = self def post( self, endpoint: str, data: Dict[str,", "re.match(r'El campo \\w+ es invalido', desc): raise InvalidField(**resp) elif id", "-22 and 'no coincide para la institucion operante' in error:", "raise AccountDoesNotExist(**resp['resultado']) elif id == -9 and re.match(r'La Institucion \\d+", "= resp['resultado']['descripcionError'] if id == 0 and error == 'No", "data) def put( self, endpoint: str, data: Dict[str, Any] )", "str, priv_key_passphrase: str, demo: bool = False, base_url: str =", "import default_backend from cryptography.hazmat.primitives import serialization from requests import Response,", "'No se recibió respuesta del servicio': raise NoServiceResponse(**resp['resultado']) elif id", "and re.match(r'El campo .+ es obligatorio', error): raise MandatoryField(**resp['resultado']) elif", "in error: raise BankCodeClabeMismatch(**resp['resultado']) elif id == -24 and re.match(r'Cuenta", "and error.startswith('No se encontr'): raise NoOrdenesEncontradas elif id == -200", "(AssertionError, KeyError): ... response.raise_for_status() def _raise_description_error_exc(resp: Dict) -> NoReturn: id", ") except (ValueError, TypeError, UnsupportedAlgorithm): raise InvalidPassphrase Resource.empresa = empresa", "re.match(r'Cuenta {\\d+} - {MISMA_CUENTA}', error): raise SameAccount(**resp['resultado']) elif id ==", "CuentaFisica ordenes: ClassVar = Orden saldos: ClassVar = Saldo def", "== 5 and re.match(r'El campo .* obligatorio \\w+', desc): raise", "empresa Resource._client = self def post( self, endpoint: str, data:", "host_url = DEMO_HOST self.session.verify = False else: host_url = PROD_HOST", "Any ) -> Union[Dict[str, Any], List[Any]]: url = self.base_url +", "fue utilizada', error ): raise ClaveRastreoAlreadyInUse(**resp['resultado']) elif id == -7", "No se levanta excepción porque # todas las cuentas pasan", "error): raise AccountDoesNotExist(**resp['resultado']) elif id == -9 and re.match(r'La Institucion", "= False else: host_url = PROD_HOST self.session.verify = True self.base_url", "Resource, Saldo from .version import __version__ as client_version DEMO_HOST =", "campo .+ es obligatorio', error): raise MandatoryField(**resp['resultado']) elif id ==", "la institucion operante' in error: raise BankCodeClabeMismatch(**resp['resultado']) elif id ==", "else: raise StpmexException(**resp['resultado']) def _raise_description_exc(resp: Dict) -> NoReturn: id =", "Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('put', endpoint, data)", "timeout self.session = Session() self.session.headers['User-Agent'] = f'stpmex-python/{client_version}' if demo: host_url", "DuplicatedAccount(**resp) elif id == 5 and re.match(r'El campo .* obligatorio", "requests import Response, Session from .exc import ( AccountDoesNotExist, BankCodeClabeMismatch,", "\\d+ es invalido', error): raise InvalidAccountType(**resp['resultado']) elif id == -20", ".+ es obligatorio', error): raise MandatoryField(**resp['resultado']) elif id == -1", "resp['descripcion'] if id == 0 and 'Cuenta en revisión' in", "f'stpmex-python/{client_version}' if demo: host_url = DEMO_HOST self.session.verify = False else:", "serialization.load_pem_private_key( priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'), default_backend(), ) except (ValueError, TypeError, UnsupportedAlgorithm): raise", "campo .* obligatorio \\w+', desc): raise MandatoryField(**resp) else: raise StpmexException(**resp)", "elif id == -7 and re.match(r'La cuenta .+ no existe',", "no es valida', error): raise InvalidInstitution(**resp['resultado']) elif id == -11", "== -24 and re.match(r'Cuenta {\\d+} - {MISMA_CUENTA}', error): raise SameAccount(**resp['resultado'])", "id = resp['id'] desc = resp['descripcion'] if id == 0", "= empresa Resource._client = self def post( self, endpoint: str,", "Resource.empresa = empresa Resource._client = self def post( self, endpoint:", "and re.match(r'Cuenta {\\d+} - {MISMA_CUENTA}', error): raise SameAccount(**resp['resultado']) elif id", "error: raise InvalidTrackingKey(**resp['resultado']) elif id == -100 and error.startswith('No se", "BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType, InvalidAmount, InvalidField, InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey,", "self.session.verify = False else: host_url = PROD_HOST self.session.verify = True", "Saldo def __init__( self, empresa: str, priv_key: str, priv_key_passphrase: str,", "resultado = resultado['resultado'] return resultado @staticmethod def _check_response(response: Response) ->", "= Orden saldos: ClassVar = Saldo def __init__( self, empresa:", "__init__( self, empresa: str, priv_key: str, priv_key_passphrase: str, demo: bool", "try: self.pkey = serialization.load_pem_private_key( priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'), default_backend(), ) except (ValueError,", "data: Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('put',", "= resp['descripcion'] if id == 0 and 'Cuenta en revisión'", "ClassVar = CuentaFisica ordenes: ClassVar = Orden saldos: ClassVar =", "error): raise SameAccount(**resp['resultado']) elif id == -34 and 'Clave rastreo", "= Saldo def __init__( self, empresa: str, priv_key: str, priv_key_passphrase:", "str, priv_key: str, priv_key_passphrase: str, demo: bool = False, base_url:", "or f'{host_url}/speiws/rest' self.soap_url = ( soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices' ) try:", "and re.match(r'El campo \\w+ es invalido', desc): raise InvalidField(**resp) elif", "InvalidAccountType(**resp['resultado']) elif id == -20 and re.match(r'El monto {.+} no", "elif id == 1 and desc == 'rfc/curp invalido': raise", "resultado @staticmethod def _check_response(response: Response) -> None: if not response.ok:", "== 0 and re.match(r'El campo .+ es obligatorio', error): raise", "institucion operante' in error: raise BankCodeClabeMismatch(**resp['resultado']) elif id == -24", "raise InvalidAmount(**resp['resultado']) elif id == -22 and 'no coincide para", "StpmexException, ) from .resources import CuentaFisica, Orden, Resource, Saldo from", "error): raise InvalidAmount(**resp['resultado']) elif id == -22 and 'no coincide", "put( self, endpoint: str, data: Dict[str, Any] ) -> Union[Dict[str,", "SameAccount, SignatureValidationError, StpmexException, ) from .resources import CuentaFisica, Orden, Resource,", "una cuenta. No se levanta excepción porque # todas las", "error ): raise ClaveRastreoAlreadyInUse(**resp['resultado']) elif id == -7 and re.match(r'La", "@staticmethod def _check_response(response: Response) -> None: if not response.ok: response.raise_for_status()", ") -> Union[Dict[str, Any], List[Any]]: url = self.base_url + endpoint", "id = resp['resultado']['id'] error = resp['resultado']['descripcionError'] if id == 0", "error = resp['resultado']['descripcionError'] if id == 0 and error ==", "and 'no coincide para la institucion operante' in error: raise", "= response.json() if isinstance(resp, dict): try: _raise_description_error_exc(resp) except KeyError: ...", "invalido', error): raise InvalidAccountType(**resp['resultado']) elif id == -20 and re.match(r'El", "id == -11 and re.match(r'El tipo de cuenta \\d+ es", "endpoint, data) def put( self, endpoint: str, data: Dict[str, Any]", "in error: raise InvalidTrackingKey(**resp['resultado']) elif id == -100 and error.startswith('No", "desc = resp['descripcion'] if id == 0 and 'Cuenta en", "-9 and re.match(r'La Institucion \\d+ no es valida', error): raise", "\\d+ no es valida', error): raise InvalidInstitution(**resp['resultado']) elif id ==", "id == 5 and re.match(r'El campo .* obligatorio \\w+', desc):", "recibió respuesta del servicio': raise NoServiceResponse(**resp['resultado']) elif id == 0", "cuenta \\d+ es invalido', error): raise InvalidAccountType(**resp['resultado']) elif id ==", "raise InvalidRfcOrCurp(**resp) elif id == 1 and re.match(r'El campo \\w+", "response = self.session.request( method, url, json=data, timeout=self.timeout, **kwargs, ) self._check_response(response)", "base_url: str = None, soap_url: str = None, timeout: tuple", "por este status. ... elif id == 1 and desc", "if id == 0 and error == 'No se recibió", "and error == 'Error validando la firma': raise SignatureValidationError(**resp['resultado']) elif", "InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas, NoServiceResponse, PldRejected, SameAccount, SignatureValidationError, StpmexException, ) from", "__version__ as client_version DEMO_HOST = 'https://demo.stpmex.com:7024' PROD_HOST = 'https://prod.stpmex.com' class", "default_backend from cryptography.hazmat.primitives import serialization from requests import Response, Session", "es invalido', desc): raise InvalidField(**resp) elif id == 3 and", "cryptography.hazmat.primitives import serialization from requests import Response, Session from .exc", "las cuentas pasan por este status. ... elif id ==", "KeyError: ... try: assert resp['descripcion'] _raise_description_exc(resp) except (AssertionError, KeyError): ...", "Any], List[Any]]: return self.request('delete', endpoint, data) def request( self, method:", "data: Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('post',", "elif id == 3 and desc == 'Cuenta Duplicada': raise", "= resp['id'] desc = resp['descripcion'] if id == 0 and", "r'La clave de rastreo .+ ya fue utilizada', error ):", "data: Dict[str, Any], **kwargs: Any ) -> Union[Dict[str, Any], List[Any]]:", "if 'resultado' in resultado: # Some responses are enveloped resultado", "MandatoryField(**resp['resultado']) elif id == -1 and re.match( r'La clave de", "... try: assert resp['descripcion'] _raise_description_exc(resp) except (AssertionError, KeyError): ... response.raise_for_status()", "endpoint response = self.session.request( method, url, json=data, timeout=self.timeout, **kwargs, )", "True self.base_url = base_url or f'{host_url}/speiws/rest' self.soap_url = ( soap_url", "as client_version DEMO_HOST = 'https://demo.stpmex.com:7024' PROD_HOST = 'https://prod.stpmex.com' class Client:", "== 0 and error == 'No se recibió respuesta del", "resources cuentas: ClassVar = CuentaFisica ordenes: ClassVar = Orden saldos:", "try: assert resp['descripcion'] _raise_description_exc(resp) except (AssertionError, KeyError): ... response.raise_for_status() def", "endpoint: str, data: Dict[str, Any], **kwargs: Any ) -> Union[Dict[str,", "raise InvalidField(**resp) elif id == 3 and desc == 'Cuenta", "5 and re.match(r'El campo .* obligatorio \\w+', desc): raise MandatoryField(**resp)", "raise NoOrdenesEncontradas elif id == -200 and 'Se rechaza por", "cuenta. No se levanta excepción porque # todas las cuentas", "( soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices' ) try: self.pkey = serialization.load_pem_private_key( priv_key.encode('utf-8'),", "re from typing import Any, ClassVar, Dict, List, NoReturn, Union", "elif id == -22 and 'no coincide para la institucion", "def post( self, endpoint: str, data: Dict[str, Any] ) ->", "esta respuesta cuando se registra # una cuenta. No se", "= self.base_url + endpoint response = self.session.request( method, url, json=data,", "ordenes: ClassVar = Orden saldos: ClassVar = Saldo def __init__(", "and re.match(r'La cuenta .+ no existe', error): raise AccountDoesNotExist(**resp['resultado']) elif", "NoOrdenesEncontradas, NoServiceResponse, PldRejected, SameAccount, SignatureValidationError, StpmexException, ) from .resources import", "error): raise InvalidAccountType(**resp['resultado']) elif id == -20 and re.match(r'El monto", "= ( soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices' ) try: self.pkey = serialization.load_pem_private_key(", "_check_response(response: Response) -> None: if not response.ok: response.raise_for_status() resp =", "soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices' ) try: self.pkey = serialization.load_pem_private_key( priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'),", "'no coincide para la institucion operante' in error: raise BankCodeClabeMismatch(**resp['resultado'])", "PldRejected(**resp['resultado']) else: raise StpmexException(**resp['resultado']) def _raise_description_exc(resp: Dict) -> NoReturn: id", "== 'No se recibió respuesta del servicio': raise NoServiceResponse(**resp['resultado']) elif", "re.match( r'La clave de rastreo .+ ya fue utilizada', error", "elif id == -100 and error.startswith('No se encontr'): raise NoOrdenesEncontradas", "InvalidAccountType, InvalidAmount, InvalidField, InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas, NoServiceResponse,", "import serialization from requests import Response, Session from .exc import", "coincide para la institucion operante' in error: raise BankCodeClabeMismatch(**resp['resultado']) elif", "def delete( self, endpoint: str, data: Dict[str, Any] ) ->", "se levanta excepción porque # todas las cuentas pasan por", "except KeyError: ... try: assert resp['descripcion'] _raise_description_exc(resp) except (AssertionError, KeyError):", "'https://demo.stpmex.com:7024' PROD_HOST = 'https://prod.stpmex.com' class Client: base_url: str soap_url: str", "data) def delete( self, endpoint: str, data: Dict[str, Any] )", "self.request('delete', endpoint, data) def request( self, method: str, endpoint: str,", "cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from requests import", "_raise_description_error_exc(resp: Dict) -> NoReturn: id = resp['resultado']['id'] error = resp['resultado']['descripcionError']", "client_version DEMO_HOST = 'https://demo.stpmex.com:7024' PROD_HOST = 'https://prod.stpmex.com' class Client: base_url:", "elif id == -24 and re.match(r'Cuenta {\\d+} - {MISMA_CUENTA}', error):", "este status. ... elif id == 1 and desc ==", "Session from .exc import ( AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType,", "'rfc/curp invalido': raise InvalidRfcOrCurp(**resp) elif id == 1 and re.match(r'El", "id == 0 and 'Cuenta en revisión' in desc: #", "AccountDoesNotExist(**resp['resultado']) elif id == -9 and re.match(r'La Institucion \\d+ no", "NoOrdenesEncontradas elif id == -200 and 'Se rechaza por PLD'", "False, base_url: str = None, soap_url: str = None, timeout:", "revisión' in desc: # STP regresa esta respuesta cuando se", "rechaza por PLD' in error: raise PldRejected(**resp['resultado']) else: raise StpmexException(**resp['resultado'])", "# una cuenta. No se levanta excepción porque # todas", "id == 0 and error == 'No se recibió respuesta", "List[Any]]: return self.request('put', endpoint, data) def delete( self, endpoint: str,", "str = None, soap_url: str = None, timeout: tuple =", "no es válido', error): raise InvalidAmount(**resp['resultado']) elif id == -22", "response.ok: response.raise_for_status() resp = response.json() if isinstance(resp, dict): try: _raise_description_error_exc(resp)", "1 and re.match(r'El campo \\w+ es invalido', desc): raise InvalidField(**resp)", "def __init__( self, empresa: str, priv_key: str, priv_key_passphrase: str, demo:", "= timeout self.session = Session() self.session.headers['User-Agent'] = f'stpmex-python/{client_version}' if demo:", "cuando se registra # una cuenta. No se levanta excepción", "<reponame>cuenca-mx/stpmex-python import re from typing import Any, ClassVar, Dict, List,", "resultado['resultado'] return resultado @staticmethod def _check_response(response: Response) -> None: if", "== -200 and 'Se rechaza por PLD' in error: raise", "ClaveRastreoAlreadyInUse(**resp['resultado']) elif id == -7 and re.match(r'La cuenta .+ no", "Client: base_url: str soap_url: str session: Session # resources cuentas:", "se encontr'): raise NoOrdenesEncontradas elif id == -200 and 'Se", "InvalidField, InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas, NoServiceResponse, PldRejected, SameAccount,", "= None, timeout: tuple = None, ): self.timeout = timeout", "self, empresa: str, priv_key: str, priv_key_passphrase: str, demo: bool =", "self.request('put', endpoint, data) def delete( self, endpoint: str, data: Dict[str,", ".+ no existe', error): raise AccountDoesNotExist(**resp['resultado']) elif id == -9", "**kwargs: Any ) -> Union[Dict[str, Any], List[Any]]: url = self.base_url", "in error: raise PldRejected(**resp['resultado']) else: raise StpmexException(**resp['resultado']) def _raise_description_exc(resp: Dict)", "and re.match( r'La clave de rastreo .+ ya fue utilizada',", ".+ ya fue utilizada', error ): raise ClaveRastreoAlreadyInUse(**resp['resultado']) elif id", "= False, base_url: str = None, soap_url: str = None,", "elif id == -20 and re.match(r'El monto {.+} no es", "-24 and re.match(r'Cuenta {\\d+} - {MISMA_CUENTA}', error): raise SameAccount(**resp['resultado']) elif", "= CuentaFisica ordenes: ClassVar = Orden saldos: ClassVar = Saldo", "KeyError): ... response.raise_for_status() def _raise_description_error_exc(resp: Dict) -> NoReturn: id =", "dict): try: _raise_description_error_exc(resp) except KeyError: ... try: assert resp['descripcion'] _raise_description_exc(resp)", "-> NoReturn: id = resp['resultado']['id'] error = resp['resultado']['descripcionError'] if id", "from .resources import CuentaFisica, Orden, Resource, Saldo from .version import", "self.session = Session() self.session.headers['User-Agent'] = f'stpmex-python/{client_version}' if demo: host_url =", "None, ): self.timeout = timeout self.session = Session() self.session.headers['User-Agent'] =", "method: str, endpoint: str, data: Dict[str, Any], **kwargs: Any )", "PROD_HOST self.session.verify = True self.base_url = base_url or f'{host_url}/speiws/rest' self.soap_url", "Union[Dict[str, Any], List[Any]]: return self.request('post', endpoint, data) def put( self,", "resultado = response.json() if 'resultado' in resultado: # Some responses", "from cryptography.hazmat.primitives import serialization from requests import Response, Session from", "elif id == -200 and 'Se rechaza por PLD' in", "InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas, NoServiceResponse, PldRejected, SameAccount, SignatureValidationError, StpmexException,", "enveloped resultado = resultado['resultado'] return resultado @staticmethod def _check_response(response: Response)", "es valida', error): raise InvalidInstitution(**resp['resultado']) elif id == -11 and", "cuenta .+ no existe', error): raise AccountDoesNotExist(**resp['resultado']) elif id ==", "resp['id'] desc = resp['descripcion'] if id == 0 and 'Cuenta", "# resources cuentas: ClassVar = CuentaFisica ordenes: ClassVar = Orden", "{\\d+} - {MISMA_CUENTA}', error): raise SameAccount(**resp['resultado']) elif id == -34", "raise DuplicatedAccount(**resp) elif id == 5 and re.match(r'El campo .*", "json=data, timeout=self.timeout, **kwargs, ) self._check_response(response) resultado = response.json() if 'resultado'", "status. ... elif id == 1 and desc == 'rfc/curp", "monto {.+} no es válido', error): raise InvalidAmount(**resp['resultado']) elif id", "base_url: str soap_url: str session: Session # resources cuentas: ClassVar", "if not response.ok: response.raise_for_status() resp = response.json() if isinstance(resp, dict):", "response.raise_for_status() resp = response.json() if isinstance(resp, dict): try: _raise_description_error_exc(resp) except", "NoServiceResponse(**resp['resultado']) elif id == 0 and error == 'Error validando", "= PROD_HOST self.session.verify = True self.base_url = base_url or f'{host_url}/speiws/rest'", "respuesta del servicio': raise NoServiceResponse(**resp['resultado']) elif id == 0 and", "_raise_description_exc(resp: Dict) -> NoReturn: id = resp['id'] desc = resp['descripcion']", "TypeError, UnsupportedAlgorithm): raise InvalidPassphrase Resource.empresa = empresa Resource._client = self", "_raise_description_exc(resp) except (AssertionError, KeyError): ... response.raise_for_status() def _raise_description_error_exc(resp: Dict) ->", "base_url or f'{host_url}/speiws/rest' self.soap_url = ( soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices' )", "== -11 and re.match(r'El tipo de cuenta \\d+ es invalido',", "self.base_url = base_url or f'{host_url}/speiws/rest' self.soap_url = ( soap_url or", "in desc: # STP regresa esta respuesta cuando se registra", "porque # todas las cuentas pasan por este status. ...", "InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas, NoServiceResponse, PldRejected, SameAccount, SignatureValidationError,", "(ValueError, TypeError, UnsupportedAlgorithm): raise InvalidPassphrase Resource.empresa = empresa Resource._client =", "MandatoryField, NoOrdenesEncontradas, NoServiceResponse, PldRejected, SameAccount, SignatureValidationError, StpmexException, ) from .resources", "-> None: if not response.ok: response.raise_for_status() resp = response.json() if", "ClassVar, Dict, List, NoReturn, Union from cryptography.exceptions import UnsupportedAlgorithm from", "UnsupportedAlgorithm from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from", "Union[Dict[str, Any], List[Any]]: return self.request('put', endpoint, data) def delete( self,", "valida', error): raise InvalidInstitution(**resp['resultado']) elif id == -11 and re.match(r'El", "re.match(r'La Institucion \\d+ no es valida', error): raise InvalidInstitution(**resp['resultado']) elif", "raise SignatureValidationError(**resp['resultado']) elif id == 0 and re.match(r'El campo .+", "id == -200 and 'Se rechaza por PLD' in error:", "demo: bool = False, base_url: str = None, soap_url: str", "NoReturn: id = resp['id'] desc = resp['descripcion'] if id ==", "DuplicatedAccount, InvalidAccountType, InvalidAmount, InvalidField, InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas,", "AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType, InvalidAmount, InvalidField, InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp,", "= True self.base_url = base_url or f'{host_url}/speiws/rest' self.soap_url = (", "... elif id == 1 and desc == 'rfc/curp invalido':", "-7 and re.match(r'La cuenta .+ no existe', error): raise AccountDoesNotExist(**resp['resultado'])", "def _raise_description_exc(resp: Dict) -> NoReturn: id = resp['id'] desc =", "and 'Clave rastreo invalida' in error: raise InvalidTrackingKey(**resp['resultado']) elif id", "'Cuenta en revisión' in desc: # STP regresa esta respuesta", "typing import Any, ClassVar, Dict, List, NoReturn, Union from cryptography.exceptions", "str, data: Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return", "- {MISMA_CUENTA}', error): raise SameAccount(**resp['resultado']) elif id == -34 and", "de rastreo .+ ya fue utilizada', error ): raise ClaveRastreoAlreadyInUse(**resp['resultado'])", "soap_url: str = None, timeout: tuple = None, ): self.timeout", "None, timeout: tuple = None, ): self.timeout = timeout self.session", "error: raise BankCodeClabeMismatch(**resp['resultado']) elif id == -24 and re.match(r'Cuenta {\\d+}", "NoReturn: id = resp['resultado']['id'] error = resp['resultado']['descripcionError'] if id ==", "desc == 'rfc/curp invalido': raise InvalidRfcOrCurp(**resp) elif id == 1", "= Session() self.session.headers['User-Agent'] = f'stpmex-python/{client_version}' if demo: host_url = DEMO_HOST", "error.startswith('No se encontr'): raise NoOrdenesEncontradas elif id == -200 and", "List[Any]]: return self.request('delete', endpoint, data) def request( self, method: str,", "la firma': raise SignatureValidationError(**resp['resultado']) elif id == 0 and re.match(r'El", "-100 and error.startswith('No se encontr'): raise NoOrdenesEncontradas elif id ==", "and error == 'No se recibió respuesta del servicio': raise", "default_backend(), ) except (ValueError, TypeError, UnsupportedAlgorithm): raise InvalidPassphrase Resource.empresa =", "id == -34 and 'Clave rastreo invalida' in error: raise", "{.+} no es válido', error): raise InvalidAmount(**resp['resultado']) elif id ==", "class Client: base_url: str soap_url: str session: Session # resources", "-200 and 'Se rechaza por PLD' in error: raise PldRejected(**resp['resultado'])", "campo \\w+ es invalido', desc): raise InvalidField(**resp) elif id ==", "ClassVar = Orden saldos: ClassVar = Saldo def __init__( self,", "url = self.base_url + endpoint response = self.session.request( method, url,", "== 1 and re.match(r'El campo \\w+ es invalido', desc): raise", "= DEMO_HOST self.session.verify = False else: host_url = PROD_HOST self.session.verify", "rastreo .+ ya fue utilizada', error ): raise ClaveRastreoAlreadyInUse(**resp['resultado']) elif", "Any], List[Any]]: return self.request('post', endpoint, data) def put( self, endpoint:", "Any, ClassVar, Dict, List, NoReturn, Union from cryptography.exceptions import UnsupportedAlgorithm", "timeout: tuple = None, ): self.timeout = timeout self.session =", "empresa: str, priv_key: str, priv_key_passphrase: str, demo: bool = False,", "Any], List[Any]]: return self.request('put', endpoint, data) def delete( self, endpoint:", "-> Union[Dict[str, Any], List[Any]]: return self.request('delete', endpoint, data) def request(", "or f'{host_url}/spei/webservices/SpeiConsultaServices' ) try: self.pkey = serialization.load_pem_private_key( priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'), default_backend(),", "Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('post', endpoint, data)", "endpoint, data) def request( self, method: str, endpoint: str, data:", "InvalidAmount(**resp['resultado']) elif id == -22 and 'no coincide para la", "and re.match(r'El campo .* obligatorio \\w+', desc): raise MandatoryField(**resp) else:", "self def post( self, endpoint: str, data: Dict[str, Any] )", "es obligatorio', error): raise MandatoryField(**resp['resultado']) elif id == -1 and", "== -1 and re.match( r'La clave de rastreo .+ ya", "InvalidInstitution(**resp['resultado']) elif id == -11 and re.match(r'El tipo de cuenta", "error): raise InvalidInstitution(**resp['resultado']) elif id == -11 and re.match(r'El tipo", "InvalidRfcOrCurp(**resp) elif id == 1 and re.match(r'El campo \\w+ es", "Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('post', endpoint,", "tuple = None, ): self.timeout = timeout self.session = Session()", "STP regresa esta respuesta cuando se registra # una cuenta.", "-20 and re.match(r'El monto {.+} no es válido', error): raise", "= 'https://prod.stpmex.com' class Client: base_url: str soap_url: str session: Session", "cuentas: ClassVar = CuentaFisica ordenes: ClassVar = Orden saldos: ClassVar", "== -34 and 'Clave rastreo invalida' in error: raise InvalidTrackingKey(**resp['resultado'])", "endpoint: str, data: Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]:", "error == 'Error validando la firma': raise SignatureValidationError(**resp['resultado']) elif id", "error: raise PldRejected(**resp['resultado']) else: raise StpmexException(**resp['resultado']) def _raise_description_exc(resp: Dict) ->", "== -20 and re.match(r'El monto {.+} no es válido', error):", "DEMO_HOST self.session.verify = False else: host_url = PROD_HOST self.session.verify =", "= None, soap_url: str = None, timeout: tuple = None,", "str, demo: bool = False, base_url: str = None, soap_url:", "válido', error): raise InvalidAmount(**resp['resultado']) elif id == -22 and 'no", "no existe', error): raise AccountDoesNotExist(**resp['resultado']) elif id == -9 and", "elif id == 5 and re.match(r'El campo .* obligatorio \\w+',", "= resultado['resultado'] return resultado @staticmethod def _check_response(response: Response) -> None:", "self.session.request( method, url, json=data, timeout=self.timeout, **kwargs, ) self._check_response(response) resultado =", "False else: host_url = PROD_HOST self.session.verify = True self.base_url =", "( AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType, InvalidAmount, InvalidField, InvalidInstitution, InvalidPassphrase,", "del servicio': raise NoServiceResponse(**resp['resultado']) elif id == 0 and error", "-> Union[Dict[str, Any], List[Any]]: url = self.base_url + endpoint response", "request( self, method: str, endpoint: str, data: Dict[str, Any], **kwargs:", "raise ClaveRastreoAlreadyInUse(**resp['resultado']) elif id == -7 and re.match(r'La cuenta .+", "Session # resources cuentas: ClassVar = CuentaFisica ordenes: ClassVar =", "elif id == -11 and re.match(r'El tipo de cuenta \\d+", "id == -20 and re.match(r'El monto {.+} no es válido',", "CuentaFisica, Orden, Resource, Saldo from .version import __version__ as client_version", "0 and 'Cuenta en revisión' in desc: # STP regresa", "id == 1 and re.match(r'El campo \\w+ es invalido', desc):", "InvalidAmount, InvalidField, InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas, NoServiceResponse, PldRejected,", "'https://prod.stpmex.com' class Client: base_url: str soap_url: str session: Session #", "Union[Dict[str, Any], List[Any]]: return self.request('delete', endpoint, data) def request( self,", "= base_url or f'{host_url}/speiws/rest' self.soap_url = ( soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices'", "and re.match(r'El tipo de cuenta \\d+ es invalido', error): raise", ") -> Union[Dict[str, Any], List[Any]]: return self.request('put', endpoint, data) def", "BankCodeClabeMismatch(**resp['resultado']) elif id == -24 and re.match(r'Cuenta {\\d+} - {MISMA_CUENTA}',", "-11 and re.match(r'El tipo de cuenta \\d+ es invalido', error):", "# STP regresa esta respuesta cuando se registra # una", "responses are enveloped resultado = resultado['resultado'] return resultado @staticmethod def", "registra # una cuenta. No se levanta excepción porque #", "def _check_response(response: Response) -> None: if not response.ok: response.raise_for_status() resp", "Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('delete', endpoint,", "Dict[str, Any], **kwargs: Any ) -> Union[Dict[str, Any], List[Any]]: url", "elif id == 1 and re.match(r'El campo \\w+ es invalido',", "List[Any]]: url = self.base_url + endpoint response = self.session.request( method,", "en revisión' in desc: # STP regresa esta respuesta cuando", "self.base_url + endpoint response = self.session.request( method, url, json=data, timeout=self.timeout,", "resp = response.json() if isinstance(resp, dict): try: _raise_description_error_exc(resp) except KeyError:", "if demo: host_url = DEMO_HOST self.session.verify = False else: host_url", "de cuenta \\d+ es invalido', error): raise InvalidAccountType(**resp['resultado']) elif id", "host_url = PROD_HOST self.session.verify = True self.base_url = base_url or", "Institucion \\d+ no es valida', error): raise InvalidInstitution(**resp['resultado']) elif id", "str, data: Dict[str, Any], **kwargs: Any ) -> Union[Dict[str, Any],", "InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField, NoOrdenesEncontradas, NoServiceResponse, PldRejected, SameAccount, SignatureValidationError, StpmexException, )", "PldRejected, SameAccount, SignatureValidationError, StpmexException, ) from .resources import CuentaFisica, Orden,", "elif id == 0 and error == 'Error validando la", "endpoint, data) def delete( self, endpoint: str, data: Dict[str, Any]", "-> Union[Dict[str, Any], List[Any]]: return self.request('put', endpoint, data) def delete(", "self._check_response(response) resultado = response.json() if 'resultado' in resultado: # Some", "'resultado' in resultado: # Some responses are enveloped resultado =", "resp['descripcion'] _raise_description_exc(resp) except (AssertionError, KeyError): ... response.raise_for_status() def _raise_description_error_exc(resp: Dict)", "and re.match(r'La Institucion \\d+ no es valida', error): raise InvalidInstitution(**resp['resultado'])", "operante' in error: raise BankCodeClabeMismatch(**resp['resultado']) elif id == -24 and", "StpmexException(**resp['resultado']) def _raise_description_exc(resp: Dict) -> NoReturn: id = resp['id'] desc", "priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'), default_backend(), ) except (ValueError, TypeError, UnsupportedAlgorithm): raise InvalidPassphrase", "response.raise_for_status() def _raise_description_error_exc(resp: Dict) -> NoReturn: id = resp['resultado']['id'] error", "def request( self, method: str, endpoint: str, data: Dict[str, Any],", "+ endpoint response = self.session.request( method, url, json=data, timeout=self.timeout, **kwargs,", "and desc == 'rfc/curp invalido': raise InvalidRfcOrCurp(**resp) elif id ==", "ClassVar = Saldo def __init__( self, empresa: str, priv_key: str,", "import re from typing import Any, ClassVar, Dict, List, NoReturn,", "id == -100 and error.startswith('No se encontr'): raise NoOrdenesEncontradas elif", "Response, Session from .exc import ( AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount,", "f'{host_url}/spei/webservices/SpeiConsultaServices' ) try: self.pkey = serialization.load_pem_private_key( priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'), default_backend(), )", "InvalidPassphrase Resource.empresa = empresa Resource._client = self def post( self,", "re.match(r'La cuenta .+ no existe', error): raise AccountDoesNotExist(**resp['resultado']) elif id", "raise BankCodeClabeMismatch(**resp['resultado']) elif id == -24 and re.match(r'Cuenta {\\d+} -", "todas las cuentas pasan por este status. ... elif id", "UnsupportedAlgorithm): raise InvalidPassphrase Resource.empresa = empresa Resource._client = self def", "{MISMA_CUENTA}', error): raise SameAccount(**resp['resultado']) elif id == -34 and 'Clave", "servicio': raise NoServiceResponse(**resp['resultado']) elif id == 0 and error ==", "Orden, Resource, Saldo from .version import __version__ as client_version DEMO_HOST", "= resp['resultado']['id'] error = resp['resultado']['descripcionError'] if id == 0 and", "error): raise MandatoryField(**resp['resultado']) elif id == -1 and re.match( r'La", "import __version__ as client_version DEMO_HOST = 'https://demo.stpmex.com:7024' PROD_HOST = 'https://prod.stpmex.com'", "str, endpoint: str, data: Dict[str, Any], **kwargs: Any ) ->", "id == -22 and 'no coincide para la institucion operante'", "priv_key: str, priv_key_passphrase: str, demo: bool = False, base_url: str", "encontr'): raise NoOrdenesEncontradas elif id == -200 and 'Se rechaza", "0 and error == 'No se recibió respuesta del servicio':", "invalida' in error: raise InvalidTrackingKey(**resp['resultado']) elif id == -100 and", "self.soap_url = ( soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices' ) try: self.pkey =", "List[Any]]: return self.request('post', endpoint, data) def put( self, endpoint: str,", "from requests import Response, Session from .exc import ( AccountDoesNotExist,", "re.match(r'El tipo de cuenta \\d+ es invalido', error): raise InvalidAccountType(**resp['resultado'])", "from .exc import ( AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType, InvalidAmount,", "\\w+ es invalido', desc): raise InvalidField(**resp) elif id == 3", "id == -9 and re.match(r'La Institucion \\d+ no es valida',", "levanta excepción porque # todas las cuentas pasan por este", "None: if not response.ok: response.raise_for_status() resp = response.json() if isinstance(resp,", "== -22 and 'no coincide para la institucion operante' in", "= serialization.load_pem_private_key( priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'), default_backend(), ) except (ValueError, TypeError, UnsupportedAlgorithm):", "raise InvalidTrackingKey(**resp['resultado']) elif id == -100 and error.startswith('No se encontr'):", "Any], **kwargs: Any ) -> Union[Dict[str, Any], List[Any]]: url =", "Any], List[Any]]: url = self.base_url + endpoint response = self.session.request(", "1 and desc == 'rfc/curp invalido': raise InvalidRfcOrCurp(**resp) elif id", "raise SameAccount(**resp['resultado']) elif id == -34 and 'Clave rastreo invalida'", "Dict) -> NoReturn: id = resp['id'] desc = resp['descripcion'] if", "firma': raise SignatureValidationError(**resp['resultado']) elif id == 0 and re.match(r'El campo", "def put( self, endpoint: str, data: Dict[str, Any] ) ->", ") from .resources import CuentaFisica, Orden, Resource, Saldo from .version", "PLD' in error: raise PldRejected(**resp['resultado']) else: raise StpmexException(**resp['resultado']) def _raise_description_exc(resp:", "elif id == -1 and re.match( r'La clave de rastreo", "if id == 0 and 'Cuenta en revisión' in desc:", "desc: # STP regresa esta respuesta cuando se registra #", "self.session.headers['User-Agent'] = f'stpmex-python/{client_version}' if demo: host_url = DEMO_HOST self.session.verify =", "0 and error == 'Error validando la firma': raise SignatureValidationError(**resp['resultado'])", "Dict, List, NoReturn, Union from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.backends", "Saldo from .version import __version__ as client_version DEMO_HOST = 'https://demo.stpmex.com:7024'", "= None, ): self.timeout = timeout self.session = Session() self.session.headers['User-Agent']", "else: host_url = PROD_HOST self.session.verify = True self.base_url = base_url", "utilizada', error ): raise ClaveRastreoAlreadyInUse(**resp['resultado']) elif id == -7 and", "Dict) -> NoReturn: id = resp['resultado']['id'] error = resp['resultado']['descripcionError'] if", "Orden saldos: ClassVar = Saldo def __init__( self, empresa: str,", "self, endpoint: str, data: Dict[str, Any] ) -> Union[Dict[str, Any],", "Some responses are enveloped resultado = resultado['resultado'] return resultado @staticmethod", "desc): raise InvalidField(**resp) elif id == 3 and desc ==", "= self def post( self, endpoint: str, data: Dict[str, Any]", "saldos: ClassVar = Saldo def __init__( self, empresa: str, priv_key:", "SameAccount(**resp['resultado']) elif id == -34 and 'Clave rastreo invalida' in", "except (ValueError, TypeError, UnsupportedAlgorithm): raise InvalidPassphrase Resource.empresa = empresa Resource._client", "raise InvalidAccountType(**resp['resultado']) elif id == -20 and re.match(r'El monto {.+}", "# todas las cuentas pasan por este status. ... elif", "post( self, endpoint: str, data: Dict[str, Any] ) -> Union[Dict[str,", "id == 0 and re.match(r'El campo .+ es obligatorio', error):", ") try: self.pkey = serialization.load_pem_private_key( priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'), default_backend(), ) except", "respuesta cuando se registra # una cuenta. No se levanta", "are enveloped resultado = resultado['resultado'] return resultado @staticmethod def _check_response(response:", "InvalidTrackingKey(**resp['resultado']) elif id == -100 and error.startswith('No se encontr'): raise", "raise StpmexException(**resp['resultado']) def _raise_description_exc(resp: Dict) -> NoReturn: id = resp['id']", "id == 1 and desc == 'rfc/curp invalido': raise InvalidRfcOrCurp(**resp)", "== 'Cuenta Duplicada': raise DuplicatedAccount(**resp) elif id == 5 and", "priv_key_passphrase.encode('ascii'), default_backend(), ) except (ValueError, TypeError, UnsupportedAlgorithm): raise InvalidPassphrase Resource.empresa", "pasan por este status. ... elif id == 1 and", "id == 0 and error == 'Error validando la firma':", "self, method: str, endpoint: str, data: Dict[str, Any], **kwargs: Any", "'Clave rastreo invalida' in error: raise InvalidTrackingKey(**resp['resultado']) elif id ==", "timeout=self.timeout, **kwargs, ) self._check_response(response) resultado = response.json() if 'resultado' in", "ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType, InvalidAmount, InvalidField, InvalidInstitution, InvalidPassphrase, InvalidRfcOrCurp, InvalidTrackingKey, MandatoryField,", ") self._check_response(response) resultado = response.json() if 'resultado' in resultado: #", "from typing import Any, ClassVar, Dict, List, NoReturn, Union from", "delete( self, endpoint: str, data: Dict[str, Any] ) -> Union[Dict[str,", "bool = False, base_url: str = None, soap_url: str =", "excepción porque # todas las cuentas pasan por este status.", "serialization from requests import Response, Session from .exc import (", "if isinstance(resp, dict): try: _raise_description_error_exc(resp) except KeyError: ... try: assert", "SignatureValidationError(**resp['resultado']) elif id == 0 and re.match(r'El campo .+ es", "3 and desc == 'Cuenta Duplicada': raise DuplicatedAccount(**resp) elif id", "InvalidField(**resp) elif id == 3 and desc == 'Cuenta Duplicada':", "tipo de cuenta \\d+ es invalido', error): raise InvalidAccountType(**resp['resultado']) elif", "str soap_url: str session: Session # resources cuentas: ClassVar =", "-1 and re.match( r'La clave de rastreo .+ ya fue", "from .version import __version__ as client_version DEMO_HOST = 'https://demo.stpmex.com:7024' PROD_HOST", "from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives", "method, url, json=data, timeout=self.timeout, **kwargs, ) self._check_response(response) resultado = response.json()", "-> NoReturn: id = resp['id'] desc = resp['descripcion'] if id", "'Cuenta Duplicada': raise DuplicatedAccount(**resp) elif id == 5 and re.match(r'El", "import Any, ClassVar, Dict, List, NoReturn, Union from cryptography.exceptions import", "se registra # una cuenta. No se levanta excepción porque", "-> Union[Dict[str, Any], List[Any]]: return self.request('post', endpoint, data) def put(", "== 0 and error == 'Error validando la firma': raise", "import CuentaFisica, Orden, Resource, Saldo from .version import __version__ as", "0 and re.match(r'El campo .+ es obligatorio', error): raise MandatoryField(**resp['resultado'])", "es invalido', error): raise InvalidAccountType(**resp['resultado']) elif id == -20 and", "'Error validando la firma': raise SignatureValidationError(**resp['resultado']) elif id == 0", "return resultado @staticmethod def _check_response(response: Response) -> None: if not", "== 0 and 'Cuenta en revisión' in desc: # STP", ".exc import ( AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType, InvalidAmount, InvalidField,", "return self.request('delete', endpoint, data) def request( self, method: str, endpoint:", "desc == 'Cuenta Duplicada': raise DuplicatedAccount(**resp) elif id == 5", "resultado: # Some responses are enveloped resultado = resultado['resultado'] return", "from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from requests", "demo: host_url = DEMO_HOST self.session.verify = False else: host_url =", "resp['resultado']['id'] error = resp['resultado']['descripcionError'] if id == 0 and error", "== -9 and re.match(r'La Institucion \\d+ no es valida', error):", "Duplicada': raise DuplicatedAccount(**resp) elif id == 5 and re.match(r'El campo", "_raise_description_error_exc(resp) except KeyError: ... try: assert resp['descripcion'] _raise_description_exc(resp) except (AssertionError,", "str = None, timeout: tuple = None, ): self.timeout =", "re.match(r'El monto {.+} no es válido', error): raise InvalidAmount(**resp['resultado']) elif", "= 'https://demo.stpmex.com:7024' PROD_HOST = 'https://prod.stpmex.com' class Client: base_url: str soap_url:", "self.pkey = serialization.load_pem_private_key( priv_key.encode('utf-8'), priv_key_passphrase.encode('ascii'), default_backend(), ) except (ValueError, TypeError,", "regresa esta respuesta cuando se registra # una cuenta. No", "== 3 and desc == 'Cuenta Duplicada': raise DuplicatedAccount(**resp) elif", "cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import", "None, soap_url: str = None, timeout: tuple = None, ):", "re.match(r'El campo .* obligatorio \\w+', desc): raise MandatoryField(**resp) else: raise", "clave de rastreo .+ ya fue utilizada', error ): raise", "= response.json() if 'resultado' in resultado: # Some responses are", "elif id == -34 and 'Clave rastreo invalida' in error:", "List, NoReturn, Union from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.backends import", "self.session.verify = True self.base_url = base_url or f'{host_url}/speiws/rest' self.soap_url =", "rastreo invalida' in error: raise InvalidTrackingKey(**resp['resultado']) elif id == -100", "raise PldRejected(**resp['resultado']) else: raise StpmexException(**resp['resultado']) def _raise_description_exc(resp: Dict) -> NoReturn:", "session: Session # resources cuentas: ClassVar = CuentaFisica ordenes: ClassVar", "ya fue utilizada', error ): raise ClaveRastreoAlreadyInUse(**resp['resultado']) elif id ==", "-34 and 'Clave rastreo invalida' in error: raise InvalidTrackingKey(**resp['resultado']) elif", "Union from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.backends import default_backend from", "try: _raise_description_error_exc(resp) except KeyError: ... try: assert resp['descripcion'] _raise_description_exc(resp) except", "and 'Cuenta en revisión' in desc: # STP regresa esta", "se recibió respuesta del servicio': raise NoServiceResponse(**resp['resultado']) elif id ==", "and desc == 'Cuenta Duplicada': raise DuplicatedAccount(**resp) elif id ==", "existe', error): raise AccountDoesNotExist(**resp['resultado']) elif id == -9 and re.match(r'La", "import Response, Session from .exc import ( AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse,", "NoServiceResponse, PldRejected, SameAccount, SignatureValidationError, StpmexException, ) from .resources import CuentaFisica,", "isinstance(resp, dict): try: _raise_description_error_exc(resp) except KeyError: ... try: assert resp['descripcion']", "id == -24 and re.match(r'Cuenta {\\d+} - {MISMA_CUENTA}', error): raise", "= f'stpmex-python/{client_version}' if demo: host_url = DEMO_HOST self.session.verify = False", "DEMO_HOST = 'https://demo.stpmex.com:7024' PROD_HOST = 'https://prod.stpmex.com' class Client: base_url: str", "Union[Dict[str, Any], List[Any]]: url = self.base_url + endpoint response =", "url, json=data, timeout=self.timeout, **kwargs, ) self._check_response(response) resultado = response.json() if", "raise NoServiceResponse(**resp['resultado']) elif id == 0 and error == 'Error", "... response.raise_for_status() def _raise_description_error_exc(resp: Dict) -> NoReturn: id = resp['resultado']['id']", "raise MandatoryField(**resp['resultado']) elif id == -1 and re.match( r'La clave", ".resources import CuentaFisica, Orden, Resource, Saldo from .version import __version__", "str session: Session # resources cuentas: ClassVar = CuentaFisica ordenes:", "id == -1 and re.match( r'La clave de rastreo .+", "== 1 and desc == 'rfc/curp invalido': raise InvalidRfcOrCurp(**resp) elif", "== -7 and re.match(r'La cuenta .+ no existe', error): raise", "por PLD' in error: raise PldRejected(**resp['resultado']) else: raise StpmexException(**resp['resultado']) def", "self.timeout = timeout self.session = Session() self.session.headers['User-Agent'] = f'stpmex-python/{client_version}' if", "Session() self.session.headers['User-Agent'] = f'stpmex-python/{client_version}' if demo: host_url = DEMO_HOST self.session.verify", "import ( AccountDoesNotExist, BankCodeClabeMismatch, ClaveRastreoAlreadyInUse, DuplicatedAccount, InvalidAccountType, InvalidAmount, InvalidField, InvalidInstitution,", ") -> Union[Dict[str, Any], List[Any]]: return self.request('post', endpoint, data) def", "== 'rfc/curp invalido': raise InvalidRfcOrCurp(**resp) elif id == 1 and", "in resultado: # Some responses are enveloped resultado = resultado['resultado']", "def _raise_description_error_exc(resp: Dict) -> NoReturn: id = resp['resultado']['id'] error =", "PROD_HOST = 'https://prod.stpmex.com' class Client: base_url: str soap_url: str session:", "f'{host_url}/speiws/rest' self.soap_url = ( soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices' ) try: self.pkey", "raise InvalidPassphrase Resource.empresa = empresa Resource._client = self def post(", "= self.session.request( method, url, json=data, timeout=self.timeout, **kwargs, ) self._check_response(response) resultado", "'Se rechaza por PLD' in error: raise PldRejected(**resp['resultado']) else: raise", "error == 'No se recibió respuesta del servicio': raise NoServiceResponse(**resp['resultado'])", "id == -7 and re.match(r'La cuenta .+ no existe', error):", "para la institucion operante' in error: raise BankCodeClabeMismatch(**resp['resultado']) elif id", "and re.match(r'El monto {.+} no es válido', error): raise InvalidAmount(**resp['resultado'])", "== 'Error validando la firma': raise SignatureValidationError(**resp['resultado']) elif id ==", "**kwargs, ) self._check_response(response) resultado = response.json() if 'resultado' in resultado:", "response.json() if 'resultado' in resultado: # Some responses are enveloped", "cuentas pasan por este status. ... elif id == 1", "resp['resultado']['descripcionError'] if id == 0 and error == 'No se", "re.match(r'El campo .+ es obligatorio', error): raise MandatoryField(**resp['resultado']) elif id", "soap_url: str session: Session # resources cuentas: ClassVar = CuentaFisica", "Dict[str, Any] ) -> Union[Dict[str, Any], List[Any]]: return self.request('put', endpoint,", "es válido', error): raise InvalidAmount(**resp['resultado']) elif id == -22 and", "== -100 and error.startswith('No se encontr'): raise NoOrdenesEncontradas elif id", "raise InvalidInstitution(**resp['resultado']) elif id == -11 and re.match(r'El tipo de", "return self.request('put', endpoint, data) def delete( self, endpoint: str, data:", "assert resp['descripcion'] _raise_description_exc(resp) except (AssertionError, KeyError): ... response.raise_for_status() def _raise_description_error_exc(resp:", ".version import __version__ as client_version DEMO_HOST = 'https://demo.stpmex.com:7024' PROD_HOST =" ]
[ "l, ['1','2','3','4','7','8'] ) l -= \"2, 3\" self.assertEqual( l, ['1','4','7','8']", ") l += [7, 8] self.assertEqual( l, [1,2,3,4,7,8] ) l", "[10,12] ) self.assertEqual( l, [10,12,1,4,7,8] ) l.extend( [0,-1] ) self.assertEqual(", "\"10,12\" ) self.assertEqual( l, ['10','12','1','4','7','8'] ) l.extend( \"0,-1\" ) self.assertEqual(", "ul.remove( 55 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) self.assertEqual( ul.pop(), 4", "UniqueList, SplitListType, List, ValueListType #//===========================================================================// class TestListTypes( AqlTestCase ): def", "self.assertEqual( list(ul), [1,2,3,4,5]) ul -= [2,2,2,4,33]; ul.selfTest() self.assertEqual( list(ul), [1,3,5])", "\", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, [1,2,3,4] ) self.assertEqual(", "self.assertEqual( l, [1,2,3,4] ) l += [7, 8] self.assertEqual( l,", "); ul.selfTest() self.assertEqual( ul, [2,3,1]) self.assertEqual( list(ul), [1,2,3]) ul =", "-= 5 self.assertEqual( l, [1,4,7,8] ) l.extend_front( [10,12] ) self.assertEqual(", "3\" self.assertEqual( l, ['1','4','7','8'] ) l -= \"5\" self.assertEqual( l,", "); ul.selfTest() ul.append( 1 ); ul.selfTest() self.assertEqual( list(ul), [1,3,2]) ul.append_front(", "self.assertEqual( l, [10,12,1,4,7,8,0,-1] ) l[0] = \"5\" self.assertEqual( l, [5,12,1,4,7,8,0,-1]", "2 ); ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append( 1 );", "3,,, \\n\\r\\t4\") self.assertEqual( l, ['1','2','3','4'] ) self.assertEqual( l, \"1,2,3,4\" )", "def test_valuelist(self): l = SplitListType( ValueListType( List, int ), \",", "ul.selfTest() ul.append( 1 ); ul.selfTest() ul.append( 2 ); ul.selfTest() ul.append(", "ul, [2,3,1]) self.assertEqual( list(ul), [1,2,3]) ul = UniqueList() ul.append( 1", "[5,3,1] ) ul.reverse(); ul.selfTest() self.assertEqual( str(ul), \"[1, 3, 5]\" )", "list(ul), [1,3,2]) ul.append_front( 2 ); ul.selfTest() self.assertEqual( list(ul), [2,1,3]) ul.extend(", ") self.assertEqual( str(l), \"1,2,3,4\" ) l += \"7, 8\" self.assertEqual(", "test_valuelist(self): l = SplitListType( ValueListType( List, int ), \", \\t\\n\\r\"", "ul.selfTest() self.assertEqual( list(ul), [2,3,4]) self.assertEqual( ul.pop(), 4 ); ul.selfTest() self.assertEqual(", "[1,2,1,1,1,4] ) #//===========================================================================// def test_splitlist(self): l = SplitListType( List, \",", "[4,1,2,2,5] ); ul.selfTest() self.assertEqual( list(ul), [2,1,3,4,5]) ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest()", "import UniqueList, SplitListType, List, ValueListType #//===========================================================================// class TestListTypes( AqlTestCase ):", "[2, 3] self.assertEqual( l, [1,4,7,8] ) l -= 5 self.assertEqual(", "\"7, 8\" self.assertEqual( l, ['1','2','3','4','7','8'] ) l -= \"2, 3\"", "); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) self.assertEqual( ul.pop(), 4 ); ul.selfTest()", "list(ul), [1,2,3,5,4]) self.assertEqual( list(ul), [1,2,3,5,4]) ul.remove( 1 ); ul.selfTest() self.assertEqual(", "l = SplitListType( List, \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual(", "[1,3,5]) self.assertEqual( ul[0], 1) self.assertEqual( ul[2], 5) self.assertEqual( ul[1], 3)", "#//===========================================================================// class TestListTypes( AqlTestCase ): def test_unique_list(self): ul = UniqueList(", "ul.selfTest() self.assertEqual( ul.pop_front(), 2 ); ul.selfTest() self.assertEqual( ul.pop_front(), 3 );", "l, [1,2,3,4] ) l += [7, 8] self.assertEqual( l, [1,2,3,4,7,8]", "self.assertEqual( list(ul), [2,3,4]) self.assertEqual( ul.pop(), 4 ); ul.selfTest() self.assertEqual( ul.pop_front(),", "78 self.assertEqual( l, ['1','2','3','4','7','8', 78] ) l -= 78 self.assertEqual(", "list(ul), [1,2,3]) ul = UniqueList() ul.append( 1 ); ul.selfTest() ul.append(", "[1,2,3,4,5]) ul -= [2,2,2,4,33]; ul.selfTest() self.assertEqual( list(ul), [1,3,5]) self.assertEqual( ul[0],", ") l -= \"5\" self.assertEqual( l, ['1','4','7','8'] ) l.extend_front( \"10,12\"", "self.assertEqual( list(ul), [2,1,3]) ul.extend( [4,1,2,2,5] ); ul.selfTest() self.assertEqual( list(ul), [2,1,3,4,5])", "self.assertEqual( l, ['1','4','7','8'] ) l -= \"5\" self.assertEqual( l, ['1','4','7','8']", "self.assertEqual( str(l), \"1,2,3,4\" ) l += \"7, 8\" self.assertEqual( l,", "); ul.selfTest() ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest() self.assertEqual( list(ul), [1,2,3,4,5]) ul", "self.assertEqual( ul, [5,3,1] ) ul.reverse(); ul.selfTest() self.assertEqual( str(ul), \"[1, 3,", ") self.assertEqual( str(l), \"1,2,3,4\" ) l += [7, 8] self.assertEqual(", "ul.selfTest() self.assertEqual( list(ul), [2,3,4]) ul.remove( 55 ); ul.selfTest() self.assertEqual( list(ul),", "l, [10,12,1,4,7,8] ) l.extend( [0,-1] ) self.assertEqual( l, [10,12,1,4,7,8, 0,", "); ul.selfTest() self.assertEqual( list(ul), [2,1,3,4,5]) ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest() self.assertEqual(", "2 3 4\" ) self.assertEqual( str(l), \"1,2,3,4\" ) l +=", "aql_tests import skip, AqlTestCase, runLocalTests from aql.util_types import UniqueList, SplitListType,", "l -= 78 self.assertEqual( l, ['1','2','3','4','7','8'] ) l -= \"2,", "+= [7, 8] self.assertEqual( l, ['1','2','3','4','7','8'] ) l += 78", "list(ul), [1,3,5]) self.assertEqual( ul[0], 1) self.assertEqual( ul[2], 5) self.assertEqual( ul[1],", "self.assertEqual( l, [10,12,1,4,7,8] ) l.extend( [0,-1] ) self.assertEqual( l, [10,12,1,4,7,8,", "\"0,-1\" ) self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1'] ) #//===========================================================================// def", "import skip, AqlTestCase, runLocalTests from aql.util_types import UniqueList, SplitListType, List,", "); ul.selfTest() ul.append( 1 ); ul.selfTest() ul.append( 2 ); ul.selfTest()", "ul.append( 1 ); ul.selfTest() ul.append( 2 ); ul.selfTest() ul.append( 3", "self.assertEqual( l, ['1','2','3','4','7','8', 78] ) l -= 78 self.assertEqual( l,", "+= [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest() self.assertEqual( list(ul), [1,2,3,4,5]) ul -= [2,2,2,4,33]; ul.selfTest()", "ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest() self.assertEqual( list(ul), [1,2,3,4,5]) ul -= [2,2,2,4,33];", "3,,, \\n\\r\\t4\") self.assertEqual( l, [1,2,3,4] ) self.assertEqual( l, \"1,2,3,4\" )", "UniqueList(ul) ) self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) ) self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] )", "l, ['1','4','7','8'] ) l -= \"5\" self.assertEqual( l, ['1','4','7','8'] )", "); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) ul.remove( 55 ); ul.selfTest() self.assertEqual(", "list(ul), [2,3,4]) self.assertEqual( ul.pop(), 4 ); ul.selfTest() self.assertEqual( ul.pop_front(), 2", "\"1,2,3,4\" ) self.assertEqual( l, \"1 2 3 4\" ) self.assertEqual(", "sys import os.path import timeit sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__", "ul.append_front( 2 ); ul.selfTest() self.assertEqual( list(ul), [2,1,3]) ul.extend( [4,1,2,2,5] );", "self.assertEqual( list(ul), [1,2,3]) ul = UniqueList() ul.append( 1 ); ul.selfTest()", "[1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest() self.assertEqual( list(ul), [1,2,3,4,5]) ul -= [2,2,2,4,33]; ul.selfTest() self.assertEqual(", "l, \"1,2,3,4\" ) self.assertEqual( l, \"1 2 3 4\" )", "self.assertEqual( l, \"1 2 3 4\" ) self.assertEqual( str(l), \"1,2,3,4\"", "4 ); ul.selfTest() self.assertEqual( ul.pop_front(), 2 ); ul.selfTest() self.assertEqual( ul.pop_front(),", "l = List([1,2,3,4]) self.assertEqual( l, [1,2,3,4] ) l += [7,", "[5,3,1]) ul.reverse(); ul.selfTest() self.assertEqual( ul, [5,3,1] ) ul.reverse(); ul.selfTest() self.assertEqual(", "5 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) ul.remove( 55 ); ul.selfTest()", "List([1,2,3,4]) self.assertEqual( l, [1,2,3,4] ) l += [7, 8] self.assertEqual(", ") self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1'] ) #//===========================================================================// def test_valuelist(self):", "4\" ) self.assertEqual( str(l), \"1,2,3,4\" ) l += [7, 8]", "= SplitListType( ValueListType( List, int ), \", \\t\\n\\r\" )(\"1,2, 3,,,", ") self.assertEqual( l, \"1 2 3 4\" ) self.assertEqual( str(l),", "self.assertEqual( l, [5,12,1,4,7,8,0,-1] ) #//===========================================================================// def test_list(self): l = List([1,2,3,4])", "l, [1,2,3,4,7,8] ) l -= [2, 3] self.assertEqual( l, [1,4,7,8]", "[2,2,2,4,33]; ul.selfTest() self.assertEqual( list(ul), [1,3,5]) self.assertEqual( ul[0], 1) self.assertEqual( ul[2],", "self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) ) self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] ) #//===========================================================================// def", "5]) ) self.assertEqual( ul, UniqueList(ul) ) self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) )", "ul.selfTest() self.assertEqual( list(ul), [1,2,3,4,5]) ul -= [2,2,2,4,33]; ul.selfTest() self.assertEqual( list(ul),", "self.assertEqual( list(reversed(ul)), [5,3,1]) ul.reverse(); ul.selfTest() self.assertEqual( ul, [5,3,1] ) ul.reverse();", "self.assertEqual( ul[1], 3) self.assertIn( 1, ul) self.assertEqual( list(reversed(ul)), [5,3,1]) ul.reverse();", "skip, AqlTestCase, runLocalTests from aql.util_types import UniqueList, SplitListType, List, ValueListType", "self.assertEqual( l, [1,4,7,8] ) l.extend_front( [10,12] ) self.assertEqual( l, [10,12,1,4,7,8]", "3) self.assertIn( 1, ul) self.assertEqual( list(reversed(ul)), [5,3,1]) ul.reverse(); ul.selfTest() self.assertEqual(", "ul.reverse(); ul.selfTest() self.assertEqual( ul, [5,3,1] ) ul.reverse(); ul.selfTest() self.assertEqual( str(ul),", "l.extend( \"0,-1\" ) self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1'] ) #//===========================================================================//", "3] self.assertEqual( l, [1,4,7,8] ) l -= 5 self.assertEqual( l,", "str(l), \"1,2,3,4\" ) l += [7, 8] self.assertEqual( l, ['1','2','3','4','7','8']", "78 self.assertEqual( l, ['1','2','3','4','7','8'] ) l -= \"2, 3\" self.assertEqual(", "ul.selfTest() ul.append( 1 ); ul.selfTest() self.assertEqual( list(ul), [1,3,2]) ul.append_front( 2", "-= [2, 3] self.assertEqual( l, [1,4,7,8] ) l -= 5", "3 4\" ) self.assertEqual( str(l), \"1,2,3,4\" ) l += [7,", "['10','12','1','4','7','8'] ) l.extend( \"0,-1\" ) self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1']", "timeit sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') )) from", "def test_splitlist(self): l = SplitListType( List, \", \\t\\n\\r\" )(\"1,2, 3,,,", ") #//===========================================================================// def test_list(self): l = List([1,2,3,4]) self.assertEqual( l, [1,2,3,4]", "ul -= [2,2,2,4,33]; ul.selfTest() self.assertEqual( list(ul), [1,3,5]) self.assertEqual( ul[0], 1)", "[2,1,3,4,5]) ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest() self.assertEqual( list(ul), [1,2,3,5,4]) self.assertEqual( list(ul),", "ul.selfTest() self.assertEqual( list(ul), [2,1,3]) ul.extend( [4,1,2,2,5] ); ul.selfTest() self.assertEqual( list(ul),", "ul.selfTest() self.assertEqual( ul.pop_front(), 3 ); ul.selfTest() ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest()", "self.assertEqual( l, ['1','2','3','4'] ) self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual( l,", "[10,12,1,4,7,8, 0, -1] ) #//===========================================================================// if __name__ == \"__main__\": runLocalTests()", "-= 78 self.assertEqual( l, [1,2,3,4,7,8] ) l -= [2, 3]", "runLocalTests from aql.util_types import UniqueList, SplitListType, List, ValueListType #//===========================================================================// class", "ul.selfTest() ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest() self.assertEqual( list(ul), [1,2,3,4,5]) ul -=", "ul.reverse(); ul.selfTest() self.assertEqual( str(ul), \"[1, 3, 5]\" ) self.assertEqual( ul,", "ul.selfTest() self.assertEqual( list(ul), [1,2,3,5,4]) self.assertEqual( list(ul), [1,2,3,5,4]) ul.remove( 1 );", "[1,2,3,5,4]) self.assertEqual( list(ul), [1,2,3,5,4]) ul.remove( 1 ); ul.selfTest() self.assertEqual( list(ul),", "self.assertEqual( l, ['10','12','1','4','7','8'] ) l.extend( \"0,-1\" ) self.assertEqual( l, [10,12,1,4,7,8,0,-1]", "ul.selfTest() self.assertEqual( str(ul), \"[1, 3, 5]\" ) self.assertEqual( ul, UniqueList([1,", "[7, 8] self.assertEqual( l, ['1','2','3','4','7','8'] ) l += 78 self.assertEqual(", ") l += 78 self.assertEqual( l, ['1','2','3','4','7','8', 78] ) l", "+= [7, 8] self.assertEqual( l, [1,2,3,4,7,8] ) l += 78", "); ul.selfTest() ul.append( 2 ); ul.selfTest() ul.append( 3 ); ul.selfTest()", "['10','12','1','4','7','8', '0', '-1'] ) #//===========================================================================// def test_valuelist(self): l = SplitListType(", "ul = UniqueList( [1,2,3,2,1,3] ); ul.selfTest() self.assertEqual( ul, [2,3,1]) self.assertEqual(", "ul[2], 5) self.assertEqual( ul[1], 3) self.assertIn( 1, ul) self.assertEqual( list(reversed(ul)),", "self.assertEqual( str(l), \"1,2,3,4\" ) l += [7, 8] self.assertEqual( l,", "\"1,2,3,4\" ) l += [7, 8] self.assertEqual( l, ['1','2','3','4','7','8'] )", ") l.extend( [0,-1] ) self.assertEqual( l, [10,12,1,4,7,8, 0, -1] )", "ul.selfTest() self.assertEqual( list(ul), [1,3,2]) ul.append_front( 2 ); ul.selfTest() self.assertEqual( list(ul),", "[1,4,7,8] ) l -= 5 self.assertEqual( l, [1,4,7,8] ) l.extend_front(", "5]\" ) self.assertEqual( ul, UniqueList([1, 3, 5]) ) self.assertEqual( ul,", "l, [10,12,1,4,7,8, 0, -1] ) #//===========================================================================// if __name__ == \"__main__\":", ")) from aql_tests import skip, AqlTestCase, runLocalTests from aql.util_types import", ") self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] ) #//===========================================================================// def test_splitlist(self): l =", "= List([1,2,3,4]) self.assertEqual( l, [1,2,3,4] ) l += [7, 8]", "ul, UniqueList(ul) ) self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) ) self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4]", "[1,4,7,8] ) l.extend_front( [10,12] ) self.assertEqual( l, [10,12,1,4,7,8] ) l.extend(", "), \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, [1,2,3,4] )", "); ul.selfTest() self.assertEqual( ul.pop_front(), 2 ); ul.selfTest() self.assertEqual( ul.pop_front(), 3", "l.extend_front( \"10,12\" ) self.assertEqual( l, ['10','12','1','4','7','8'] ) l.extend( \"0,-1\" )", "8] self.assertEqual( l, ['1','2','3','4','7','8'] ) l += 78 self.assertEqual( l,", "l += [7, 8] self.assertEqual( l, [1,2,3,4,7,8] ) l +=", "[1,2,3,5,4]) ul.remove( 1 ); ul.selfTest() self.assertEqual( list(ul), [2,3,5,4]) ul.remove( 5", "78] ) l -= 78 self.assertEqual( l, ['1','2','3','4','7','8'] ) l", ") l -= 5 self.assertEqual( l, [1,4,7,8] ) l.extend_front( [10,12]", "List, ValueListType #//===========================================================================// class TestListTypes( AqlTestCase ): def test_unique_list(self): ul", "0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') )) from aql_tests import", "ul, [5,3,1] ) ul.reverse(); ul.selfTest() self.assertEqual( str(ul), \"[1, 3, 5]\"", ")(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, ['1','2','3','4'] ) self.assertEqual( l, \"1,2,3,4\"", "l += \"7, 8\" self.assertEqual( l, ['1','2','3','4','7','8'] ) l -=", "ul.remove( 1 ); ul.selfTest() self.assertEqual( list(ul), [2,3,5,4]) ul.remove( 5 );", "from aql.util_types import UniqueList, SplitListType, List, ValueListType #//===========================================================================// class TestListTypes(", ") l -= 78 self.assertEqual( l, ['1','2','3','4','7','8'] ) l -=", "l, \"1 2 3 4\" ) self.assertEqual( str(l), \"1,2,3,4\" )", ") l += 78 self.assertEqual( l, [1,2,3,4,7,8,78] ) l -=", "UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) ) self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] ) #//===========================================================================// def test_splitlist(self):", "), '..') )) from aql_tests import skip, AqlTestCase, runLocalTests from", "ul.selfTest() self.assertEqual( list(ul), [1,3,5]) self.assertEqual( ul[0], 1) self.assertEqual( ul[2], 5)", "str(ul), \"[1, 3, 5]\" ) self.assertEqual( ul, UniqueList([1, 3, 5])", "['1','2','3','4'] ) self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual( l, \"1 2", "\\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, ['1','2','3','4'] ) self.assertEqual( l,", "3 4\" ) self.assertEqual( str(l), \"1,2,3,4\" ) l += \"7,", "[10,12,1,4,7,8,0,-1] ) l[0] = \"5\" self.assertEqual( l, [5,12,1,4,7,8,0,-1] ) #//===========================================================================//", "self.assertEqual( l, ['1','2','3','4','7','8'] ) l += 78 self.assertEqual( l, ['1','2','3','4','7','8',", "AqlTestCase ): def test_unique_list(self): ul = UniqueList( [1,2,3,2,1,3] ); ul.selfTest()", ") l -= [2, 3] self.assertEqual( l, [1,4,7,8] ) l", "\"5\" self.assertEqual( l, [5,12,1,4,7,8,0,-1] ) #//===========================================================================// def test_list(self): l =", "import sys import os.path import timeit sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname(", "['1','2','3','4','7','8', 78] ) l -= 78 self.assertEqual( l, ['1','2','3','4','7','8'] )", "[1,2,3,4,7,8] ) l -= [2, 3] self.assertEqual( l, [1,4,7,8] )", "+= 78 self.assertEqual( l, ['1','2','3','4','7','8', 78] ) l -= 78", ") self.assertEqual( l, ['10','12','1','4','7','8'] ) l.extend( \"0,-1\" ) self.assertEqual( l,", "ul.append( 2 ); ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append( 1", "l -= 78 self.assertEqual( l, [1,2,3,4,7,8] ) l -= [2,", "1 ); ul.selfTest() self.assertEqual( list(ul), [2,3,5,4]) ul.remove( 5 ); ul.selfTest()", "\\n\\r\\t4\") self.assertEqual( l, ['1','2','3','4'] ) self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual(", "ul.append( 3 ); ul.selfTest() ul.append( 1 ); ul.selfTest() ul.append( 2", "l.extend_front( [10,12] ) self.assertEqual( l, [10,12,1,4,7,8] ) l.extend( [0,-1] )", ") l.extend( \"0,-1\" ) self.assertEqual( l, [10,12,1,4,7,8,0,-1] ) l[0] =", "); ul.selfTest() self.assertEqual( ul.pop_front(), 3 ); ul.selfTest() ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5];", "self.assertEqual( l, [1,2,3,4] ) self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual( l,", "[1,2,2,3,1,1,5,5] ); ul.selfTest() self.assertEqual( list(ul), [1,2,3,5,4]) self.assertEqual( list(ul), [1,2,3,5,4]) ul.remove(", "1 ); ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append( 1 );", "os.path.dirname( __file__ ), '..') )) from aql_tests import skip, AqlTestCase,", "); ul.selfTest() self.assertEqual( list(ul), [1,2,3,5,4]) self.assertEqual( list(ul), [1,2,3,5,4]) ul.remove( 1", "self.assertEqual( ul.pop_front(), 2 ); ul.selfTest() self.assertEqual( ul.pop_front(), 3 ); ul.selfTest()", "int ), \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, [1,2,3,4]", "l, [1,4,7,8] ) l.extend_front( [10,12] ) self.assertEqual( l, [10,12,1,4,7,8] )", "[2,3,1]) self.assertEqual( list(ul), [1,2,3]) ul = UniqueList() ul.append( 1 );", "self.assertEqual( list(ul), [1,2,3,5,4]) self.assertEqual( list(ul), [1,2,3,5,4]) ul.remove( 1 ); ul.selfTest()", "['1','2','3','4','7','8'] ) l += 78 self.assertEqual( l, ['1','2','3','4','7','8', 78] )", "SplitListType, List, ValueListType #//===========================================================================// class TestListTypes( AqlTestCase ): def test_unique_list(self):", "ul.pop(), 4 ); ul.selfTest() self.assertEqual( ul.pop_front(), 2 ); ul.selfTest() self.assertEqual(", "self.assertEqual( ul.pop_front(), 3 ); ul.selfTest() ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest() self.assertEqual(", "l, ['10','12','1','4','7','8'] ) l.extend( \"0,-1\" ) self.assertEqual( l, [10,12,1,4,7,8,0,-1] )", "['1','4','7','8'] ) l -= \"5\" self.assertEqual( l, ['1','4','7','8'] ) l.extend_front(", "list(ul), [2,1,3]) ul.extend( [4,1,2,2,5] ); ul.selfTest() self.assertEqual( list(ul), [2,1,3,4,5]) ul.extend_front(", "[1,2,3,4,7,8] ) l += 78 self.assertEqual( l, [1,2,3,4,7,8,78] ) l", "\\n\\r\\t4\") self.assertEqual( l, [1,2,3,4] ) self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual(", "l.extend( [0,-1] ) self.assertEqual( l, [10,12,1,4,7,8, 0, -1] ) #//===========================================================================//", "[1,3,2]) ul.append_front( 2 ); ul.selfTest() self.assertEqual( list(ul), [2,1,3]) ul.extend( [4,1,2,2,5]", "UniqueList([1,2,1,1,1,4]) ) self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] ) #//===========================================================================// def test_splitlist(self): l", "self.assertEqual( list(ul), [2,3,4]) ul.remove( 55 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4])", "+= \"7, 8\" self.assertEqual( l, ['1','2','3','4','7','8'] ) l -= \"2,", "os.path import timeit sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..')", "ul.selfTest() ul.append( 2 ); ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append(", "AqlTestCase, runLocalTests from aql.util_types import UniqueList, SplitListType, List, ValueListType #//===========================================================================//", "\", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, ['1','2','3','4'] ) self.assertEqual(", "list(reversed(ul)), [5,3,1]) ul.reverse(); ul.selfTest() self.assertEqual( ul, [5,3,1] ) ul.reverse(); ul.selfTest()", "1 ); ul.selfTest() self.assertEqual( list(ul), [1,3,2]) ul.append_front( 2 ); ul.selfTest()", "-= [2,2,2,4,33]; ul.selfTest() self.assertEqual( list(ul), [1,3,5]) self.assertEqual( ul[0], 1) self.assertEqual(", "[5,12,1,4,7,8,0,-1] ) #//===========================================================================// def test_list(self): l = List([1,2,3,4]) self.assertEqual( l,", "\\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, [1,2,3,4] ) self.assertEqual( l,", "= UniqueList() ul.append( 1 ); ul.selfTest() ul.append( 3 ); ul.selfTest()", ") l.extend( \"0,-1\" ) self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1'] )", "str(l), \"1,2,3,4\" ) l += \"7, 8\" self.assertEqual( l, ['1','2','3','4','7','8']", "'..') )) from aql_tests import skip, AqlTestCase, runLocalTests from aql.util_types", "from aql_tests import skip, AqlTestCase, runLocalTests from aql.util_types import UniqueList,", "self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual( l, \"1 2 3 4\"", "self.assertEqual( l, ['1','4','7','8'] ) l.extend_front( \"10,12\" ) self.assertEqual( l, ['10','12','1','4','7','8']", "[7, 8] self.assertEqual( l, [1,2,3,4,7,8] ) l += 78 self.assertEqual(", "4\" ) self.assertEqual( str(l), \"1,2,3,4\" ) l += \"7, 8\"", "ul.append( 1 ); ul.selfTest() self.assertEqual( list(ul), [1,3,2]) ul.append_front( 2 );", "ul[1], 3) self.assertIn( 1, ul) self.assertEqual( list(reversed(ul)), [5,3,1]) ul.reverse(); ul.selfTest()", ") self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual( l, \"1 2 3", "= \"5\" self.assertEqual( l, [5,12,1,4,7,8,0,-1] ) #//===========================================================================// def test_list(self): l", ") self.assertEqual( l, [10,12,1,4,7,8] ) l.extend( [0,-1] ) self.assertEqual( l,", "List, int ), \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l,", "ul[0], 1) self.assertEqual( ul[2], 5) self.assertEqual( ul[1], 3) self.assertIn( 1,", "+= 78 self.assertEqual( l, [1,2,3,4,7,8,78] ) l -= 78 self.assertEqual(", "self.assertEqual( ul, [2,3,1]) self.assertEqual( list(ul), [1,2,3]) ul = UniqueList() ul.append(", "self.assertEqual( l, [1,2,3,4,7,8] ) l += 78 self.assertEqual( l, [1,2,3,4,7,8,78]", "ul.append( 3 ); ul.selfTest() ul.append( 1 ); ul.selfTest() self.assertEqual( list(ul),", ")(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, [1,2,3,4] ) self.assertEqual( l, \"1,2,3,4\"", "\"[1, 3, 5]\" ) self.assertEqual( ul, UniqueList([1, 3, 5]) )", "l, ['10','12','1','4','7','8', '0', '-1'] ) #//===========================================================================// def test_valuelist(self): l =", "test_splitlist(self): l = SplitListType( List, \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\")", "ul.selfTest() self.assertEqual( ul, [5,3,1] ) ul.reverse(); ul.selfTest() self.assertEqual( str(ul), \"[1,", "ul.pop_front(), 2 ); ul.selfTest() self.assertEqual( ul.pop_front(), 3 ); ul.selfTest() ul", "l += 78 self.assertEqual( l, ['1','2','3','4','7','8', 78] ) l -=", ") l.extend_front( [10,12] ) self.assertEqual( l, [10,12,1,4,7,8] ) l.extend( [0,-1]", "['1','4','7','8'] ) l.extend_front( \"10,12\" ) self.assertEqual( l, ['10','12','1','4','7','8'] ) l.extend(", ") self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) ) self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] ) #//===========================================================================//", "l += 78 self.assertEqual( l, [1,2,3,4,7,8,78] ) l -= 78", "[2,3,4]) self.assertEqual( ul.pop(), 4 ); ul.selfTest() self.assertEqual( ul.pop_front(), 2 );", "ul = UniqueList() ul.append( 1 ); ul.selfTest() ul.append( 3 );", "def test_list(self): l = List([1,2,3,4]) self.assertEqual( l, [1,2,3,4] ) l", "['10','12','1','4','7','8'] ) l.extend( \"0,-1\" ) self.assertEqual( l, [10,12,1,4,7,8,0,-1] ) l[0]", "l.extend( \"0,-1\" ) self.assertEqual( l, [10,12,1,4,7,8,0,-1] ) l[0] = \"5\"", "UniqueList() ul.append( 1 ); ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append(", "self.assertEqual( ul.pop(), 4 ); ul.selfTest() self.assertEqual( ul.pop_front(), 2 ); ul.selfTest()", "l -= [2, 3] self.assertEqual( l, [1,4,7,8] ) l -=", "list(ul), [1,2,3,4,5]) ul -= [2,2,2,4,33]; ul.selfTest() self.assertEqual( list(ul), [1,3,5]) self.assertEqual(", ") l[0] = \"5\" self.assertEqual( l, [5,12,1,4,7,8,0,-1] ) #//===========================================================================// def", "l, [1,2,3,4,7,8] ) l += 78 self.assertEqual( l, [1,2,3,4,7,8,78] )", ") self.assertEqual( l, [10,12,1,4,7,8,0,-1] ) l[0] = \"5\" self.assertEqual( l,", "self.assertEqual( ul[2], 5) self.assertEqual( ul[1], 3) self.assertIn( 1, ul) self.assertEqual(", "\"2, 3\" self.assertEqual( l, ['1','4','7','8'] ) l -= \"5\" self.assertEqual(", "l, ['1','2','3','4','7','8'] ) l += 78 self.assertEqual( l, ['1','2','3','4','7','8', 78]", "self.assertEqual( l, [10,12,1,4,7,8, 0, -1] ) #//===========================================================================// if __name__ ==", "3 ); ul.selfTest() ul.append( 1 ); ul.selfTest() ul.append( 2 );", "ValueListType #//===========================================================================// class TestListTypes( AqlTestCase ): def test_unique_list(self): ul =", "self.assertEqual( list(ul), [1,2,3,5,4]) ul.remove( 1 ); ul.selfTest() self.assertEqual( list(ul), [2,3,5,4])", "TestListTypes( AqlTestCase ): def test_unique_list(self): ul = UniqueList( [1,2,3,2,1,3] );", "-= \"5\" self.assertEqual( l, ['1','4','7','8'] ) l.extend_front( \"10,12\" ) self.assertEqual(", "l, ['10','12','1','4','7','8'] ) l.extend( \"0,-1\" ) self.assertEqual( l, ['10','12','1','4','7','8', '0',", "-= 78 self.assertEqual( l, ['1','2','3','4','7','8'] ) l -= \"2, 3\"", "self.assertEqual( list(ul), [1,3,5]) self.assertEqual( ul[0], 1) self.assertEqual( ul[2], 5) self.assertEqual(", "\"1 2 3 4\" ) self.assertEqual( str(l), \"1,2,3,4\" ) l", "l, ['1','2','3','4','7','8', 78] ) l -= 78 self.assertEqual( l, ['1','2','3','4','7','8']", "l, [1,2,3,4,7,8,78] ) l -= 78 self.assertEqual( l, [1,2,3,4,7,8] )", "ul.remove( 5 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) ul.remove( 55 );", "2 ); ul.selfTest() self.assertEqual( ul.pop_front(), 3 ); ul.selfTest() ul +=", "import os.path import timeit sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ),", ") l += \"7, 8\" self.assertEqual( l, ['1','2','3','4','7','8'] ) l", "ul.append( 1 ); ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append( 1", "sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') )) from aql_tests", "self.assertEqual( l, ['1','2','3','4','7','8'] ) l -= \"2, 3\" self.assertEqual( l,", "self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] ) #//===========================================================================// def test_splitlist(self): l = SplitListType(", "#//===========================================================================// def test_list(self): l = List([1,2,3,4]) self.assertEqual( l, [1,2,3,4] )", "self.assertIn( 1, ul) self.assertEqual( list(reversed(ul)), [5,3,1]) ul.reverse(); ul.selfTest() self.assertEqual( ul,", "8\" self.assertEqual( l, ['1','2','3','4','7','8'] ) l -= \"2, 3\" self.assertEqual(", "UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] ) #//===========================================================================// def test_splitlist(self): l = SplitListType( List,", "SplitListType( ValueListType( List, int ), \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\")", "); ul.selfTest() self.assertEqual( list(ul), [2,1,3]) ul.extend( [4,1,2,2,5] ); ul.selfTest() self.assertEqual(", "l, [5,12,1,4,7,8,0,-1] ) #//===========================================================================// def test_list(self): l = List([1,2,3,4]) self.assertEqual(", "ul, UniqueList([1, 3, 5]) ) self.assertEqual( ul, UniqueList(ul) ) self.assertLess(", "self.assertEqual( l, [1,2,3,4,7,8,78] ) l -= 78 self.assertEqual( l, [1,2,3,4,7,8]", "[2,3,4]) ul.remove( 55 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) self.assertEqual( ul.pop(),", "self.assertEqual( list(ul), [2,1,3,4,5]) ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest() self.assertEqual( list(ul), [1,2,3,5,4])", "UniqueList( [1,2,3,2,1,3] ); ul.selfTest() self.assertEqual( ul, [2,3,1]) self.assertEqual( list(ul), [1,2,3])", "ul.selfTest() self.assertEqual( ul, [2,3,1]) self.assertEqual( list(ul), [1,2,3]) ul = UniqueList()", "SplitListType( List, \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, ['1','2','3','4']", "ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append( 1 ); ul.selfTest() self.assertEqual(", "list(ul), [2,3,5,4]) ul.remove( 5 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) ul.remove(", "[2,1,3]) ul.extend( [4,1,2,2,5] ); ul.selfTest() self.assertEqual( list(ul), [2,1,3,4,5]) ul.extend_front( [1,2,2,3,1,1,5,5]", "= SplitListType( List, \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l,", "self.assertEqual( str(ul), \"[1, 3, 5]\" ) self.assertEqual( ul, UniqueList([1, 3,", "list(ul), [1,2,3,5,4]) ul.remove( 1 ); ul.selfTest() self.assertEqual( list(ul), [2,3,5,4]) ul.remove(", "self.assertEqual( list(ul), [2,3,5,4]) ul.remove( 5 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4])", ") l += [7, 8] self.assertEqual( l, ['1','2','3','4','7','8'] ) l", "1) self.assertEqual( ul[2], 5) self.assertEqual( ul[1], 3) self.assertIn( 1, ul)", ") l.extend_front( \"10,12\" ) self.assertEqual( l, ['10','12','1','4','7','8'] ) l.extend( \"0,-1\"", "l, [1,4,7,8] ) l -= 5 self.assertEqual( l, [1,4,7,8] )", "[1,2,3,2,1,3] ); ul.selfTest() self.assertEqual( ul, [2,3,1]) self.assertEqual( list(ul), [1,2,3]) ul", "l = SplitListType( ValueListType( List, int ), \", \\t\\n\\r\" )(\"1,2,", "= UniqueList( [1,2,3,2,1,3] ); ul.selfTest() self.assertEqual( ul, [2,3,1]) self.assertEqual( list(ul),", ") #//===========================================================================// def test_splitlist(self): l = SplitListType( List, \", \\t\\n\\r\"", "[1,2,3,4] ) l += [7, 8] self.assertEqual( l, [1,2,3,4,7,8] )", "class TestListTypes( AqlTestCase ): def test_unique_list(self): ul = UniqueList( [1,2,3,2,1,3]", "ul.pop_front(), 3 ); ul.selfTest() ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest() self.assertEqual( list(ul),", "1, ul) self.assertEqual( list(reversed(ul)), [5,3,1]) ul.reverse(); ul.selfTest() self.assertEqual( ul, [5,3,1]", "'0', '-1'] ) #//===========================================================================// def test_valuelist(self): l = SplitListType( ValueListType(", "[1,2,3]) ul = UniqueList() ul.append( 1 ); ul.selfTest() ul.append( 3", "ul) self.assertEqual( list(reversed(ul)), [5,3,1]) ul.reverse(); ul.selfTest() self.assertEqual( ul, [5,3,1] )", "\"0,-1\" ) self.assertEqual( l, [10,12,1,4,7,8,0,-1] ) l[0] = \"5\" self.assertEqual(", "#//===========================================================================// def test_splitlist(self): l = SplitListType( List, \", \\t\\n\\r\" )(\"1,2,", "ul.selfTest() self.assertEqual( list(ul), [2,3,5,4]) ul.remove( 5 ); ul.selfTest() self.assertEqual( list(ul),", "3 ); ul.selfTest() ul.append( 1 ); ul.selfTest() self.assertEqual( list(ul), [1,3,2])", "[0,-1] ) self.assertEqual( l, [10,12,1,4,7,8, 0, -1] ) #//===========================================================================// if", "ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append( 1 ); ul.selfTest() ul.append(", "l, ['1','4','7','8'] ) l.extend_front( \"10,12\" ) self.assertEqual( l, ['10','12','1','4','7','8'] )", "list(ul), [2,1,3,4,5]) ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest() self.assertEqual( list(ul), [1,2,3,5,4]) self.assertEqual(", "\"5\" self.assertEqual( l, ['1','4','7','8'] ) l.extend_front( \"10,12\" ) self.assertEqual( l,", "); ul.selfTest() self.assertEqual( list(ul), [2,3,5,4]) ul.remove( 5 ); ul.selfTest() self.assertEqual(", "3 ); ul.selfTest() ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest() self.assertEqual( list(ul), [1,2,3,4,5])", "os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') )) from aql_tests import skip,", "[1,2,3,4] ) self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual( l, \"1 2", "#//===========================================================================// def test_valuelist(self): l = SplitListType( ValueListType( List, int ),", ") self.assertEqual( ul, UniqueList(ul) ) self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) ) self.assertLess(", "55 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) self.assertEqual( ul.pop(), 4 );", "ul.selfTest() self.assertEqual( list(ul), [2,1,3,4,5]) ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest() self.assertEqual( list(ul),", "ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest() self.assertEqual( list(ul), [1,2,3,5,4]) self.assertEqual( list(ul), [1,2,3,5,4])", "test_list(self): l = List([1,2,3,4]) self.assertEqual( l, [1,2,3,4] ) l +=", "['1','2','3','4','7','8'] ) l -= \"2, 3\" self.assertEqual( l, ['1','4','7','8'] )", ") self.assertEqual( l, [10,12,1,4,7,8, 0, -1] ) #//===========================================================================// if __name__", "2 ); ul.selfTest() self.assertEqual( list(ul), [2,1,3]) ul.extend( [4,1,2,2,5] ); ul.selfTest()", "ValueListType( List, int ), \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual(", ") l -= 78 self.assertEqual( l, [1,2,3,4,7,8] ) l -=", "test_unique_list(self): ul = UniqueList( [1,2,3,2,1,3] ); ul.selfTest() self.assertEqual( ul, [2,3,1])", "l, [1,2,3,4] ) self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual( l, \"1", "1 ); ul.selfTest() ul.append( 2 ); ul.selfTest() ul.append( 3 );", "l[0] = \"5\" self.assertEqual( l, [5,12,1,4,7,8,0,-1] ) #//===========================================================================// def test_list(self):", "); ul.selfTest() self.assertEqual( list(ul), [1,3,2]) ul.append_front( 2 ); ul.selfTest() self.assertEqual(", "): def test_unique_list(self): ul = UniqueList( [1,2,3,2,1,3] ); ul.selfTest() self.assertEqual(", "[2,3,5,4]) ul.remove( 5 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) ul.remove( 55", "8] self.assertEqual( l, [1,2,3,4,7,8] ) l += 78 self.assertEqual( l,", "[10,12,1,4,7,8] ) l.extend( [0,-1] ) self.assertEqual( l, [10,12,1,4,7,8, 0, -1]", "self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1'] ) #//===========================================================================// def test_valuelist(self): l", "self.assertEqual( list(ul), [1,3,2]) ul.append_front( 2 ); ul.selfTest() self.assertEqual( list(ul), [2,1,3])", "l -= \"5\" self.assertEqual( l, ['1','4','7','8'] ) l.extend_front( \"10,12\" )", ") ul.reverse(); ul.selfTest() self.assertEqual( str(ul), \"[1, 3, 5]\" ) self.assertEqual(", ") l -= \"2, 3\" self.assertEqual( l, ['1','4','7','8'] ) l", "aql.util_types import UniqueList, SplitListType, List, ValueListType #//===========================================================================// class TestListTypes( AqlTestCase", "l, ['1','2','3','4'] ) self.assertEqual( l, \"1,2,3,4\" ) self.assertEqual( l, \"1", "[1,2,3,4,7,8,78] ) l -= 78 self.assertEqual( l, [1,2,3,4,7,8] ) l", "self.assertEqual( ul, UniqueList(ul) ) self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) ) self.assertLess( UniqueList([1,2,2,2,3]),", ") self.assertEqual( ul, UniqueList([1, 3, 5]) ) self.assertEqual( ul, UniqueList(ul)", "self.assertEqual( ul, UniqueList([1, 3, 5]) ) self.assertEqual( ul, UniqueList(ul) )", "ul.extend( [4,1,2,2,5] ); ul.selfTest() self.assertEqual( list(ul), [2,1,3,4,5]) ul.extend_front( [1,2,2,3,1,1,5,5] );", "l -= 5 self.assertEqual( l, [1,4,7,8] ) l.extend_front( [10,12] )", "-= \"2, 3\" self.assertEqual( l, ['1','4','7','8'] ) l -= \"5\"", "self.assertEqual( l, [1,4,7,8] ) l -= 5 self.assertEqual( l, [1,4,7,8]", "78 self.assertEqual( l, [1,2,3,4,7,8,78] ) l -= 78 self.assertEqual( l,", "3, 5]) ) self.assertEqual( ul, UniqueList(ul) ) self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4])", "UniqueList([1, 3, 5]) ) self.assertEqual( ul, UniqueList(ul) ) self.assertLess( UniqueList([1,2,2,2,3]),", "l -= \"2, 3\" self.assertEqual( l, ['1','4','7','8'] ) l -=", "'-1'] ) #//===========================================================================// def test_valuelist(self): l = SplitListType( ValueListType( List,", "); ul.selfTest() ul.append( 3 ); ul.selfTest() ul.append( 1 ); ul.selfTest()", "def test_unique_list(self): ul = UniqueList( [1,2,3,2,1,3] ); ul.selfTest() self.assertEqual( ul,", "3, 5]\" ) self.assertEqual( ul, UniqueList([1, 3, 5]) ) self.assertEqual(", "self.assertEqual( ul[0], 1) self.assertEqual( ul[2], 5) self.assertEqual( ul[1], 3) self.assertIn(", "l, [10,12,1,4,7,8,0,-1] ) l[0] = \"5\" self.assertEqual( l, [5,12,1,4,7,8,0,-1] )", "5) self.assertEqual( ul[1], 3) self.assertIn( 1, ul) self.assertEqual( list(reversed(ul)), [5,3,1])", "l += [7, 8] self.assertEqual( l, ['1','2','3','4','7','8'] ) l +=", "__file__ ), '..') )) from aql_tests import skip, AqlTestCase, runLocalTests", ") #//===========================================================================// def test_valuelist(self): l = SplitListType( ValueListType( List, int", "list(ul), [2,3,4]) ul.remove( 55 ); ul.selfTest() self.assertEqual( list(ul), [2,3,4]) self.assertEqual(", "5 self.assertEqual( l, [1,4,7,8] ) l.extend_front( [10,12] ) self.assertEqual( l,", "\"1,2,3,4\" ) l += \"7, 8\" self.assertEqual( l, ['1','2','3','4','7','8'] )", "List, \", \\t\\n\\r\" )(\"1,2, 3,,, \\n\\r\\t4\") self.assertEqual( l, ['1','2','3','4'] )", "import timeit sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') ))", "self.assertEqual( l, [1,2,3,4,7,8] ) l -= [2, 3] self.assertEqual( l,", "78 self.assertEqual( l, [1,2,3,4,7,8] ) l -= [2, 3] self.assertEqual(", "self.assertEqual( l, ['10','12','1','4','7','8'] ) l.extend( \"0,-1\" ) self.assertEqual( l, ['10','12','1','4','7','8'," ]
[ "def log(self, file_object, email, log_message, log_writer_id): self.now = datetime.now() self.date", "from src.utils import uploaded_file import os class App_Logger: def __init__(self):", "from datetime import datetime from src.utils import uploaded_file import os", "self.date = self.now.date() self.current_time = self.now.strftime(\"%H:%M:%S\") file_object.write( email+ \"_eda_\" +", "+ log_writer_id + \"\\t\\t\" +str(self.date) + \"/\" + str(self.current_time) +", "uploaded_file import os class App_Logger: def __init__(self): pass def log(self,", "datetime.now() self.date = self.now.date() self.current_time = self.now.strftime(\"%H:%M:%S\") file_object.write( email+ \"_eda_\"", "= self.now.date() self.current_time = self.now.strftime(\"%H:%M:%S\") file_object.write( email+ \"_eda_\" + log_writer_id", "self.now.date() self.current_time = self.now.strftime(\"%H:%M:%S\") file_object.write( email+ \"_eda_\" + log_writer_id +", "log(self, file_object, email, log_message, log_writer_id): self.now = datetime.now() self.date =", "self.now = datetime.now() self.date = self.now.date() self.current_time = self.now.strftime(\"%H:%M:%S\") file_object.write(", "log_message, log_writer_id): self.now = datetime.now() self.date = self.now.date() self.current_time =", "+str(self.date) + \"/\" + str(self.current_time) + \"\\t\\t\" +email+ \"\\t\\t\" +log_message", "class App_Logger: def __init__(self): pass def log(self, file_object, email, log_message,", "datetime import datetime from src.utils import uploaded_file import os class", "<reponame>swatishayna/OnlineEDAAutomation<filename>logger_application/logger.py from datetime import datetime from src.utils import uploaded_file import", "log_writer_id): self.now = datetime.now() self.date = self.now.date() self.current_time = self.now.strftime(\"%H:%M:%S\")", "datetime from src.utils import uploaded_file import os class App_Logger: def", "src.utils import uploaded_file import os class App_Logger: def __init__(self): pass", "import datetime from src.utils import uploaded_file import os class App_Logger:", "= datetime.now() self.date = self.now.date() self.current_time = self.now.strftime(\"%H:%M:%S\") file_object.write( email+", "email+ \"_eda_\" + log_writer_id + \"\\t\\t\" +str(self.date) + \"/\" +", "self.now.strftime(\"%H:%M:%S\") file_object.write( email+ \"_eda_\" + log_writer_id + \"\\t\\t\" +str(self.date) +", "import os class App_Logger: def __init__(self): pass def log(self, file_object,", "= self.now.strftime(\"%H:%M:%S\") file_object.write( email+ \"_eda_\" + log_writer_id + \"\\t\\t\" +str(self.date)", "log_writer_id + \"\\t\\t\" +str(self.date) + \"/\" + str(self.current_time) + \"\\t\\t\"", "email, log_message, log_writer_id): self.now = datetime.now() self.date = self.now.date() self.current_time", "pass def log(self, file_object, email, log_message, log_writer_id): self.now = datetime.now()", "\"_eda_\" + log_writer_id + \"\\t\\t\" +str(self.date) + \"/\" + str(self.current_time)", "self.current_time = self.now.strftime(\"%H:%M:%S\") file_object.write( email+ \"_eda_\" + log_writer_id + \"\\t\\t\"", "import uploaded_file import os class App_Logger: def __init__(self): pass def", "os class App_Logger: def __init__(self): pass def log(self, file_object, email,", "file_object.write( email+ \"_eda_\" + log_writer_id + \"\\t\\t\" +str(self.date) + \"/\"", "App_Logger: def __init__(self): pass def log(self, file_object, email, log_message, log_writer_id):", "+ \"/\" + str(self.current_time) + \"\\t\\t\" +email+ \"\\t\\t\" +log_message +\"\\n\")", "\"\\t\\t\" +str(self.date) + \"/\" + str(self.current_time) + \"\\t\\t\" +email+ \"\\t\\t\"", "__init__(self): pass def log(self, file_object, email, log_message, log_writer_id): self.now =", "+ \"\\t\\t\" +str(self.date) + \"/\" + str(self.current_time) + \"\\t\\t\" +email+", "def __init__(self): pass def log(self, file_object, email, log_message, log_writer_id): self.now", "file_object, email, log_message, log_writer_id): self.now = datetime.now() self.date = self.now.date()" ]
[ "# name of meta root dir META_DIR = \".metasync\" #", "Increase of Paxos proposal number PAXOS_PNUM_INC = 10 # authentication", "params KB = 1024 MB = 1024*KB GB = 1024*MB", "BLOB_UNIT = 32*MB # Increase of Paxos proposal number PAXOS_PNUM_INC", "blob size BLOB_UNIT = 32*MB # Increase of Paxos proposal", "root dir META_DIR = \".metasync\" # batching time for daemon", "32*MB # Increase of Paxos proposal number PAXOS_PNUM_INC = 10", "Paxos proposal number PAXOS_PNUM_INC = 10 # authentication directory import", "dir META_DIR = \".metasync\" # batching time for daemon SYNC_WAIT", "of Paxos proposal number PAXOS_PNUM_INC = 10 # authentication directory", "= 1024*KB GB = 1024*MB # name of meta root", "# Increase of Paxos proposal number PAXOS_PNUM_INC = 10 #", "META_DIR = \".metasync\" # batching time for daemon SYNC_WAIT =", "proposal number PAXOS_PNUM_INC = 10 # authentication directory import os", "= 1024*MB # name of meta root dir META_DIR =", "daemon SYNC_WAIT = 3 # blob size BLOB_UNIT = 32*MB", "for daemon SYNC_WAIT = 3 # blob size BLOB_UNIT =", "10 # authentication directory import os AUTH_DIR = os.path.join(os.path.expanduser(\"~\"), \".metasync\")", "time for daemon SYNC_WAIT = 3 # blob size BLOB_UNIT", "batching time for daemon SYNC_WAIT = 3 # blob size", "1024 MB = 1024*KB GB = 1024*MB # name of", "of meta root dir META_DIR = \".metasync\" # batching time", "1024*KB GB = 1024*MB # name of meta root dir", "= \".metasync\" # batching time for daemon SYNC_WAIT = 3", "# batching time for daemon SYNC_WAIT = 3 # blob", "GB = 1024*MB # name of meta root dir META_DIR", "name of meta root dir META_DIR = \".metasync\" # batching", "meta root dir META_DIR = \".metasync\" # batching time for", "size BLOB_UNIT = 32*MB # Increase of Paxos proposal number", "# blob size BLOB_UNIT = 32*MB # Increase of Paxos", "1024*MB # name of meta root dir META_DIR = \".metasync\"", "3 # blob size BLOB_UNIT = 32*MB # Increase of", "= 10 # authentication directory import os AUTH_DIR = os.path.join(os.path.expanduser(\"~\"),", "= 1024 MB = 1024*KB GB = 1024*MB # name", "= 32*MB # Increase of Paxos proposal number PAXOS_PNUM_INC =", "MB = 1024*KB GB = 1024*MB # name of meta", "number PAXOS_PNUM_INC = 10 # authentication directory import os AUTH_DIR", "config params KB = 1024 MB = 1024*KB GB =", "SYNC_WAIT = 3 # blob size BLOB_UNIT = 32*MB #", "PAXOS_PNUM_INC = 10 # authentication directory import os AUTH_DIR =", "# config params KB = 1024 MB = 1024*KB GB", "KB = 1024 MB = 1024*KB GB = 1024*MB #", "= 3 # blob size BLOB_UNIT = 32*MB # Increase", "\".metasync\" # batching time for daemon SYNC_WAIT = 3 #" ]
[ "test(\"()[]{}\", result=True) test(\"(]\", result=False) test(\"([)]\", result=False) test(\"{[]}\", result=True) test(\"\", result=True)", "unittest from py.tests.utils import test from py import valid_parentheses as", "@test(vp.Solution.is_valid) def test_valid_parentheses(self) -> None: test(\"()\", result=True) test(\"()[]{}\", result=True) test(\"(]\",", "test(\"([)]\", result=False) test(\"{[]}\", result=True) test(\"\", result=True) test(\")()\", result=False) test(\"(())((())))\", result=False)", "class TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid) def test_valid_parentheses(self) -> None: test(\"()\", result=True) test(\"()[]{}\",", "from py import valid_parentheses as vp class TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid) def", "test from py import valid_parentheses as vp class TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid)", "valid_parentheses as vp class TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid) def test_valid_parentheses(self) -> None:", "TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid) def test_valid_parentheses(self) -> None: test(\"()\", result=True) test(\"()[]{}\", result=True)", "import valid_parentheses as vp class TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid) def test_valid_parentheses(self) ->", "test(\"(]\", result=False) test(\"([)]\", result=False) test(\"{[]}\", result=True) test(\"\", result=True) test(\")()\", result=False)", "import unittest from py.tests.utils import test from py import valid_parentheses", "as vp class TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid) def test_valid_parentheses(self) -> None: test(\"()\",", "result=False) test(\"([)]\", result=False) test(\"{[]}\", result=True) test(\"\", result=True) test(\")()\", result=False) test(\"(())((())))\",", "vp class TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid) def test_valid_parentheses(self) -> None: test(\"()\", result=True)", "None: test(\"()\", result=True) test(\"()[]{}\", result=True) test(\"(]\", result=False) test(\"([)]\", result=False) test(\"{[]}\",", "py.tests.utils import test from py import valid_parentheses as vp class", "test_valid_parentheses(self) -> None: test(\"()\", result=True) test(\"()[]{}\", result=True) test(\"(]\", result=False) test(\"([)]\",", "py import valid_parentheses as vp class TestValidParentheses(unittest.TestCase): @test(vp.Solution.is_valid) def test_valid_parentheses(self)", "import test from py import valid_parentheses as vp class TestValidParentheses(unittest.TestCase):", "-> None: test(\"()\", result=True) test(\"()[]{}\", result=True) test(\"(]\", result=False) test(\"([)]\", result=False)", "test(\"()\", result=True) test(\"()[]{}\", result=True) test(\"(]\", result=False) test(\"([)]\", result=False) test(\"{[]}\", result=True)", "result=True) test(\"(]\", result=False) test(\"([)]\", result=False) test(\"{[]}\", result=True) test(\"\", result=True) test(\")()\",", "def test_valid_parentheses(self) -> None: test(\"()\", result=True) test(\"()[]{}\", result=True) test(\"(]\", result=False)", "from py.tests.utils import test from py import valid_parentheses as vp", "result=True) test(\"()[]{}\", result=True) test(\"(]\", result=False) test(\"([)]\", result=False) test(\"{[]}\", result=True) test(\"\"," ]
[ "inference on the image if self.model_type == ModelType.flyingthings: left_disparity, right_disparity", "# Perform inference on the image if self.model_type == ModelType.flyingthings:", "2) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img", "= np.expand_dims(right_img,2) combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0 else:", "from hitnet.utils_hitnet import * drivingStereo_config = CameraConfig(0.546, 1000) class HitNet():", "axis=-1) / 255.0 else: # Shape (1, None, None, 6)", "(1, None, None, 2) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img =", "left_disparity, right_disparity = self.inference(input_tensor) self.disparity_map = left_disparity else: self.disparity_map =", "right_img): return self.estimate_disparity(left_img, right_img) def initialize_model(self, model_path, model_type): self.model_type =", "== ModelType.flyingthings: left_disparity, right_disparity = self.inference(input_tensor) self.disparity_map = left_disparity else:", "import numpy as np import time import cv2 from hitnet.utils_hitnet", "self.timeLastPrediction = time.time() self.frameCounter = 0 self.camera_config = camera_config #", "self.camera_config = camera_config # Initialize model self.model = self.initialize_model(model_path, model_type)", "frozen graph to ConcreteFunctions if self.model_type == ModelType.flyingthings: model =", "<reponame>AchintyaSrivastava/HITNET-Stereo-Depth-estimation import tensorflow as tf import numpy as np import", "self.model_type = model_type with tf.io.gfile.GFile(model_path, \"rb\") as f: graph_def =", "return self.disparity_map def get_depth(self): return self.camera_config.f*self.camera_config.baseline/self.disparity_map def prepare_input(self, left_img, right_img):", "def prepare_input(self, left_img, right_img): if (self.model_type == ModelType.eth3d): # Shape", "cv2.COLOR_BGR2GRAY) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img = np.expand_dims(left_img,2) right_img =", "ModelType.flyingthings: left_disparity, right_disparity = self.inference(input_tensor) self.disparity_map = left_disparity else: self.disparity_map", "model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"]) else: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\",", "0 self.timeLastPrediction = time.time() self.frameCounter = 0 self.camera_config = camera_config", "__call__(self, left_img, right_img): return self.estimate_disparity(left_img, right_img) def initialize_model(self, model_path, model_type):", "= self.inference(input_tensor) return self.disparity_map def get_depth(self): return self.camera_config.f*self.camera_config.baseline/self.disparity_map def prepare_input(self,", "numpy as np import time import cv2 from hitnet.utils_hitnet import", "self.inference(input_tensor) return self.disparity_map def get_depth(self): return self.camera_config.f*self.camera_config.baseline/self.disparity_map def prepare_input(self, left_img,", "255.0 return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32) def inference(self, input_tensor): output =", "Initialize model self.model = self.initialize_model(model_path, model_type) def __call__(self, left_img, right_img):", "= cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0", "np.concatenate((left_img, right_img), axis=-1) / 255.0 else: # Shape (1, None,", "right_img): if (self.model_type == ModelType.eth3d): # Shape (1, None, None,", "combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0 return tf.convert_to_tensor(np.expand_dims(combined_img, 0),", "# Shape (1, None, None, 6) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB)", "outputs=\"reference_output_disparity:0\") return model def estimate_disparity(self, left_img, right_img): input_tensor = self.prepare_input(left_img,", "as tf import numpy as np import time import cv2", "self.model_type == ModelType.flyingthings: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"]) else: model", "return self.estimate_disparity(left_img, right_img) def initialize_model(self, model_path, model_type): self.model_type = model_type", "self.initialize_model(model_path, model_type) def __call__(self, left_img, right_img): return self.estimate_disparity(left_img, right_img) def", "# Wrap frozen graph to ConcreteFunctions if self.model_type == ModelType.flyingthings:", "self.frameCounter = 0 self.camera_config = camera_config # Initialize model self.model", "Shape (1, None, None, 6) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img", "right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img = np.expand_dims(left_img,2) right_img = np.expand_dims(right_img,2)", "time import cv2 from hitnet.utils_hitnet import * drivingStereo_config = CameraConfig(0.546,", "on the image if self.model_type == ModelType.flyingthings: left_disparity, right_disparity =", "as np import time import cv2 from hitnet.utils_hitnet import *", "np.expand_dims(right_img,2) combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0 else: #", "right_img): input_tensor = self.prepare_input(left_img, right_img) # Perform inference on the", "= np.expand_dims(left_img,2) right_img = np.expand_dims(right_img,2) combined_img = np.concatenate((left_img, right_img), axis=-1)", "cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0 return", "= self.initialize_model(model_path, model_type) def __call__(self, left_img, right_img): return self.estimate_disparity(left_img, right_img)", "model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config): self.fps = 0 self.timeLastPrediction = time.time() self.frameCounter", "model_path, model_type): self.model_type = model_type with tf.io.gfile.GFile(model_path, \"rb\") as f:", "image if self.model_type == ModelType.flyingthings: left_disparity, right_disparity = self.inference(input_tensor) self.disparity_map", "tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32) def inference(self, input_tensor): output = self.model(input_tensor) return", "camera_config # Initialize model self.model = self.initialize_model(model_path, model_type) def __call__(self,", "(1, None, None, 6) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img =", "self.estimate_disparity(left_img, right_img) def initialize_model(self, model_path, model_type): self.model_type = model_type with", "right_img) def initialize_model(self, model_path, model_type): self.model_type = model_type with tf.io.gfile.GFile(model_path,", "ModelType.eth3d): # Shape (1, None, None, 2) left_img = cv2.cvtColor(left_img,", "(self.model_type == ModelType.eth3d): # Shape (1, None, None, 2) left_img", "import cv2 from hitnet.utils_hitnet import * drivingStereo_config = CameraConfig(0.546, 1000)", "right_img = np.expand_dims(right_img,2) combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0", "tf import numpy as np import time import cv2 from", "= CameraConfig(0.546, 1000) class HitNet(): def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config):", "None, None, 6) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img = cv2.cvtColor(right_img,", "HitNet(): def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config): self.fps = 0 self.timeLastPrediction", "else: self.disparity_map = self.inference(input_tensor) return self.disparity_map def get_depth(self): return self.camera_config.f*self.camera_config.baseline/self.disparity_map", "outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"]) else: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=\"reference_output_disparity:0\") return model def", "cv2.COLOR_BGR2RGB) combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0 return tf.convert_to_tensor(np.expand_dims(combined_img,", "def get_depth(self): return self.camera_config.f*self.camera_config.baseline/self.disparity_map def prepare_input(self, left_img, right_img): if (self.model_type", "None, 6) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB)", "left_img, right_img): input_tensor = self.prepare_input(left_img, right_img) # Perform inference on", "right_img) # Perform inference on the image if self.model_type ==", "/ 255.0 return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32) def inference(self, input_tensor): output", "= time.time() self.frameCounter = 0 self.camera_config = camera_config # Initialize", "the image if self.model_type == ModelType.flyingthings: left_disparity, right_disparity = self.inference(input_tensor)", "self.fps = 0 self.timeLastPrediction = time.time() self.frameCounter = 0 self.camera_config", "255.0 else: # Shape (1, None, None, 6) left_img =", "= tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(f.read()) # Wrap frozen graph to", "right_img), axis=-1) / 255.0 return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32) def inference(self,", "right_disparity = self.inference(input_tensor) self.disparity_map = left_disparity else: self.disparity_map = self.inference(input_tensor)", "left_img, right_img): if (self.model_type == ModelType.eth3d): # Shape (1, None,", "== ModelType.eth3d): # Shape (1, None, None, 2) left_img =", "model_type) def __call__(self, left_img, right_img): return self.estimate_disparity(left_img, right_img) def initialize_model(self,", "hitnet.utils_hitnet import * drivingStereo_config = CameraConfig(0.546, 1000) class HitNet(): def", "ModelType.flyingthings: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"]) else: model = wrap_frozen_graph(graph_def=graph_def,", "Shape (1, None, None, 2) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img", "graph_def.ParseFromString(f.read()) # Wrap frozen graph to ConcreteFunctions if self.model_type ==", "self.disparity_map = left_disparity else: self.disparity_map = self.inference(input_tensor) return self.disparity_map def", "return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32) def inference(self, input_tensor): output = self.model(input_tensor)", "right_img), axis=-1) / 255.0 else: # Shape (1, None, None,", "np import time import cv2 from hitnet.utils_hitnet import * drivingStereo_config", "model def estimate_disparity(self, left_img, right_img): input_tensor = self.prepare_input(left_img, right_img) #", "= 0 self.camera_config = camera_config # Initialize model self.model =", "input_tensor = self.prepare_input(left_img, right_img) # Perform inference on the image", "tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(f.read()) # Wrap frozen graph to ConcreteFunctions", "return self.camera_config.f*self.camera_config.baseline/self.disparity_map def prepare_input(self, left_img, right_img): if (self.model_type == ModelType.eth3d):", "else: # Shape (1, None, None, 6) left_img = cv2.cvtColor(left_img,", "left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img =", "def initialize_model(self, model_path, model_type): self.model_type = model_type with tf.io.gfile.GFile(model_path, \"rb\")", "= np.concatenate((left_img, right_img), axis=-1) / 255.0 return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32)", "cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img = np.expand_dims(left_img,2) right_img = np.expand_dims(right_img,2) combined_img =", "import * drivingStereo_config = CameraConfig(0.546, 1000) class HitNet(): def __init__(self,", "6) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img", "self.inference(input_tensor) self.disparity_map = left_disparity else: self.disparity_map = self.inference(input_tensor) return self.disparity_map", "tf.io.gfile.GFile(model_path, \"rb\") as f: graph_def = tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(f.read())", "\"rb\") as f: graph_def = tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(f.read()) #", "as f: graph_def = tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(f.read()) # Wrap", "prepare_input(self, left_img, right_img): if (self.model_type == ModelType.eth3d): # Shape (1,", "get_depth(self): return self.camera_config.f*self.camera_config.baseline/self.disparity_map def prepare_input(self, left_img, right_img): if (self.model_type ==", "initialize_model(self, model_path, model_type): self.model_type = model_type with tf.io.gfile.GFile(model_path, \"rb\") as", "/ 255.0 else: # Shape (1, None, None, 6) left_img", "self.disparity_map def get_depth(self): return self.camera_config.f*self.camera_config.baseline/self.disparity_map def prepare_input(self, left_img, right_img): if", "if self.model_type == ModelType.flyingthings: left_disparity, right_disparity = self.inference(input_tensor) self.disparity_map =", "import time import cv2 from hitnet.utils_hitnet import * drivingStereo_config =", "import tensorflow as tf import numpy as np import time", "wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"]) else: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=\"reference_output_disparity:0\") return", "graph_def = tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(f.read()) # Wrap frozen graph", "camera_config=drivingStereo_config): self.fps = 0 self.timeLastPrediction = time.time() self.frameCounter = 0", "cv2.COLOR_BGR2GRAY) left_img = np.expand_dims(left_img,2) right_img = np.expand_dims(right_img,2) combined_img = np.concatenate((left_img,", "cv2 from hitnet.utils_hitnet import * drivingStereo_config = CameraConfig(0.546, 1000) class", "def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config): self.fps = 0 self.timeLastPrediction =", "graph to ConcreteFunctions if self.model_type == ModelType.flyingthings: model = wrap_frozen_graph(graph_def=graph_def,", "to ConcreteFunctions if self.model_type == ModelType.flyingthings: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\",", "cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img = np.concatenate((left_img, right_img),", "model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=\"reference_output_disparity:0\") return model def estimate_disparity(self, left_img,", "if (self.model_type == ModelType.eth3d): # Shape (1, None, None, 2)", "# Initialize model self.model = self.initialize_model(model_path, model_type) def __call__(self, left_img,", "Perform inference on the image if self.model_type == ModelType.flyingthings: left_disparity,", "= graph_def.ParseFromString(f.read()) # Wrap frozen graph to ConcreteFunctions if self.model_type", "= self.inference(input_tensor) self.disparity_map = left_disparity else: self.disparity_map = self.inference(input_tensor) return", "CameraConfig(0.546, 1000) class HitNet(): def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config): self.fps", "= left_disparity else: self.disparity_map = self.inference(input_tensor) return self.disparity_map def get_depth(self):", "def __call__(self, left_img, right_img): return self.estimate_disparity(left_img, right_img) def initialize_model(self, model_path,", "self.prepare_input(left_img, right_img) # Perform inference on the image if self.model_type", "inputs=\"input:0\", outputs=\"reference_output_disparity:0\") return model def estimate_disparity(self, left_img, right_img): input_tensor =", "f: graph_def = tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(f.read()) # Wrap frozen", "= cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img = np.concatenate((left_img,", "combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0 else: # Shape", "self.model = self.initialize_model(model_path, model_type) def __call__(self, left_img, right_img): return self.estimate_disparity(left_img,", "== ModelType.flyingthings: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"]) else: model =", "inputs=\"input:0\", outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"]) else: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=\"reference_output_disparity:0\") return model", "= self.prepare_input(left_img, right_img) # Perform inference on the image if", "right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img = np.concatenate((left_img, right_img), axis=-1) /", "self.model_type == ModelType.flyingthings: left_disparity, right_disparity = self.inference(input_tensor) self.disparity_map = left_disparity", "loaded = graph_def.ParseFromString(f.read()) # Wrap frozen graph to ConcreteFunctions if", "= cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img = np.expand_dims(left_img,2) right_img = np.expand_dims(right_img,2) combined_img", "1000) class HitNet(): def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config): self.fps =", "time.time() self.frameCounter = 0 self.camera_config = camera_config # Initialize model", "def estimate_disparity(self, left_img, right_img): input_tensor = self.prepare_input(left_img, right_img) # Perform", "else: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=\"reference_output_disparity:0\") return model def estimate_disparity(self,", "np.expand_dims(left_img,2) right_img = np.expand_dims(right_img,2) combined_img = np.concatenate((left_img, right_img), axis=-1) /", "left_img, right_img): return self.estimate_disparity(left_img, right_img) def initialize_model(self, model_path, model_type): self.model_type", "0), dtype=tf.float32) def inference(self, input_tensor): output = self.model(input_tensor) return np.squeeze(output)", "model self.model = self.initialize_model(model_path, model_type) def __call__(self, left_img, right_img): return", "Wrap frozen graph to ConcreteFunctions if self.model_type == ModelType.flyingthings: model", "if self.model_type == ModelType.flyingthings: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"]) else:", "model_type=ModelType.eth3d, camera_config=drivingStereo_config): self.fps = 0 self.timeLastPrediction = time.time() self.frameCounter =", "axis=-1) / 255.0 return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32) def inference(self, input_tensor):", "cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img = np.expand_dims(left_img,2) right_img", "with tf.io.gfile.GFile(model_path, \"rb\") as f: graph_def = tf.compat.v1.GraphDef() loaded =", "self.disparity_map = self.inference(input_tensor) return self.disparity_map def get_depth(self): return self.camera_config.f*self.camera_config.baseline/self.disparity_map def", "* drivingStereo_config = CameraConfig(0.546, 1000) class HitNet(): def __init__(self, model_path,", "= camera_config # Initialize model self.model = self.initialize_model(model_path, model_type) def", "left_img = np.expand_dims(left_img,2) right_img = np.expand_dims(right_img,2) combined_img = np.concatenate((left_img, right_img),", "left_disparity else: self.disparity_map = self.inference(input_tensor) return self.disparity_map def get_depth(self): return", "wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=\"reference_output_disparity:0\") return model def estimate_disparity(self, left_img, right_img): input_tensor", "model_type): self.model_type = model_type with tf.io.gfile.GFile(model_path, \"rb\") as f: graph_def", "# Shape (1, None, None, 2) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)", "= 0 self.timeLastPrediction = time.time() self.frameCounter = 0 self.camera_config =", "model_type with tf.io.gfile.GFile(model_path, \"rb\") as f: graph_def = tf.compat.v1.GraphDef() loaded", "self.camera_config.f*self.camera_config.baseline/self.disparity_map def prepare_input(self, left_img, right_img): if (self.model_type == ModelType.eth3d): #", "None, 2) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)", "= cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img = np.expand_dims(left_img,2)", "estimate_disparity(self, left_img, right_img): input_tensor = self.prepare_input(left_img, right_img) # Perform inference", "cv2.COLOR_BGR2RGB) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img = np.concatenate((left_img, right_img), axis=-1)", "class HitNet(): def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config): self.fps = 0", "= model_type with tf.io.gfile.GFile(model_path, \"rb\") as f: graph_def = tf.compat.v1.GraphDef()", "return model def estimate_disparity(self, left_img, right_img): input_tensor = self.prepare_input(left_img, right_img)", "left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img =", "= np.concatenate((left_img, right_img), axis=-1) / 255.0 else: # Shape (1,", "np.concatenate((left_img, right_img), axis=-1) / 255.0 return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32) def", "__init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config): self.fps = 0 self.timeLastPrediction = time.time()", "ConcreteFunctions if self.model_type == ModelType.flyingthings: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"])", "= wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=\"reference_output_disparity:0\") return model def estimate_disparity(self, left_img, right_img):", "None, None, 2) left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img = cv2.cvtColor(right_img,", "drivingStereo_config = CameraConfig(0.546, 1000) class HitNet(): def __init__(self, model_path, model_type=ModelType.eth3d,", "0 self.camera_config = camera_config # Initialize model self.model = self.initialize_model(model_path,", "= wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=[\"reference_output_disparity:0\",\"secondary_output_disparity:0\"]) else: model = wrap_frozen_graph(graph_def=graph_def, inputs=\"input:0\", outputs=\"reference_output_disparity:0\")", "tensorflow as tf import numpy as np import time import" ]
[ "\"Intercept\" # Group to which the plugin belongs to def", "field_kwargs = { 'required': self.data.required, 'label': self.data.label, 'widget': forms.widgets.NumberInput(attrs={}), }", "your current address?\" form = HouseholdTenureForm group = \"Intercept\" #", "plugin belongs to def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs): field_kwargs", "= { 'required': self.data.required, 'label': self.data.label, 'widget': forms.widgets.NumberInput(attrs={}), } return", "form_element_entries=None, **kwargs): field_kwargs = { 'required': self.data.required, 'label': self.data.label, 'widget':", "Group to which the plugin belongs to def get_form_field_instances(self, request=None,", "import FormFieldPlugin, form_element_plugin_registry from .forms import HouseholdTenureForm class HouseholdTenurePlugin(FormFieldPlugin): \"\"\"HouseholdTenurePlugin.\"\"\"", "belongs to def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs): field_kwargs =", "the plugin belongs to def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs):", "def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs): field_kwargs = { 'required':", "'label': self.data.label, 'widget': forms.widgets.NumberInput(attrs={}), } return [(self.data.name, forms.IntegerField, field_kwargs)] form_element_plugin_registry.register(HouseholdTenurePlugin)", "FormFieldPlugin, form_element_plugin_registry from .forms import HouseholdTenureForm class HouseholdTenurePlugin(FormFieldPlugin): \"\"\"HouseholdTenurePlugin.\"\"\" uid", "to which the plugin belongs to def get_form_field_instances(self, request=None, form_entry=None,", "\"\"\"HouseholdTenurePlugin.\"\"\" uid = \"household_tenure\" name = \"What year did you", "did you move into your current address?\" form = HouseholdTenureForm", "= \"What year did you move into your current address?\"", "you move into your current address?\" form = HouseholdTenureForm group", "address?\" form = HouseholdTenureForm group = \"Intercept\" # Group to", "{ 'required': self.data.required, 'label': self.data.label, 'widget': forms.widgets.NumberInput(attrs={}), } return [(self.data.name,", "HouseholdTenureForm group = \"Intercept\" # Group to which the plugin", "= \"household_tenure\" name = \"What year did you move into", "self.data.required, 'label': self.data.label, 'widget': forms.widgets.NumberInput(attrs={}), } return [(self.data.name, forms.IntegerField, field_kwargs)]", "year did you move into your current address?\" form =", "to def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs): field_kwargs = {", "'required': self.data.required, 'label': self.data.label, 'widget': forms.widgets.NumberInput(attrs={}), } return [(self.data.name, forms.IntegerField,", "from .forms import HouseholdTenureForm class HouseholdTenurePlugin(FormFieldPlugin): \"\"\"HouseholdTenurePlugin.\"\"\" uid = \"household_tenure\"", "forms from fobi.base import FormFieldPlugin, form_element_plugin_registry from .forms import HouseholdTenureForm", "\"What year did you move into your current address?\" form", "= \"Intercept\" # Group to which the plugin belongs to", ".forms import HouseholdTenureForm class HouseholdTenurePlugin(FormFieldPlugin): \"\"\"HouseholdTenurePlugin.\"\"\" uid = \"household_tenure\" name", "uid = \"household_tenure\" name = \"What year did you move", "form_element_plugin_registry from .forms import HouseholdTenureForm class HouseholdTenurePlugin(FormFieldPlugin): \"\"\"HouseholdTenurePlugin.\"\"\" uid =", "import HouseholdTenureForm class HouseholdTenurePlugin(FormFieldPlugin): \"\"\"HouseholdTenurePlugin.\"\"\" uid = \"household_tenure\" name =", "\"household_tenure\" name = \"What year did you move into your", "from django import forms from fobi.base import FormFieldPlugin, form_element_plugin_registry from", "import forms from fobi.base import FormFieldPlugin, form_element_plugin_registry from .forms import", "= HouseholdTenureForm group = \"Intercept\" # Group to which the", "django import forms from fobi.base import FormFieldPlugin, form_element_plugin_registry from .forms", "**kwargs): field_kwargs = { 'required': self.data.required, 'label': self.data.label, 'widget': forms.widgets.NumberInput(attrs={}),", "from fobi.base import FormFieldPlugin, form_element_plugin_registry from .forms import HouseholdTenureForm class", "HouseholdTenureForm class HouseholdTenurePlugin(FormFieldPlugin): \"\"\"HouseholdTenurePlugin.\"\"\" uid = \"household_tenure\" name = \"What", "HouseholdTenurePlugin(FormFieldPlugin): \"\"\"HouseholdTenurePlugin.\"\"\" uid = \"household_tenure\" name = \"What year did", "name = \"What year did you move into your current", "request=None, form_entry=None, form_element_entries=None, **kwargs): field_kwargs = { 'required': self.data.required, 'label':", "form_entry=None, form_element_entries=None, **kwargs): field_kwargs = { 'required': self.data.required, 'label': self.data.label,", "fobi.base import FormFieldPlugin, form_element_plugin_registry from .forms import HouseholdTenureForm class HouseholdTenurePlugin(FormFieldPlugin):", "class HouseholdTenurePlugin(FormFieldPlugin): \"\"\"HouseholdTenurePlugin.\"\"\" uid = \"household_tenure\" name = \"What year", "current address?\" form = HouseholdTenureForm group = \"Intercept\" # Group", "form = HouseholdTenureForm group = \"Intercept\" # Group to which", "into your current address?\" form = HouseholdTenureForm group = \"Intercept\"", "# Group to which the plugin belongs to def get_form_field_instances(self,", "which the plugin belongs to def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None,", "get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs): field_kwargs = { 'required': self.data.required,", "move into your current address?\" form = HouseholdTenureForm group =", "group = \"Intercept\" # Group to which the plugin belongs" ]
[ "<gh_stars>0 __all__ = ['EnemyBucketWithStar', 'Nut', 'Beam', 'Enemy', 'Friend', 'Hero', 'Launcher',", "'Enemy', 'Friend', 'Hero', 'Launcher', 'Rotor', 'SpikeyBuddy', 'Star', 'Wizard', 'EnemyEquipedRotor', 'CyclingEnemyObject',", "['EnemyBucketWithStar', 'Nut', 'Beam', 'Enemy', 'Friend', 'Hero', 'Launcher', 'Rotor', 'SpikeyBuddy', 'Star',", "= ['EnemyBucketWithStar', 'Nut', 'Beam', 'Enemy', 'Friend', 'Hero', 'Launcher', 'Rotor', 'SpikeyBuddy',", "__all__ = ['EnemyBucketWithStar', 'Nut', 'Beam', 'Enemy', 'Friend', 'Hero', 'Launcher', 'Rotor',", "'Friend', 'Hero', 'Launcher', 'Rotor', 'SpikeyBuddy', 'Star', 'Wizard', 'EnemyEquipedRotor', 'CyclingEnemyObject', 'Joints',", "'Hero', 'Launcher', 'Rotor', 'SpikeyBuddy', 'Star', 'Wizard', 'EnemyEquipedRotor', 'CyclingEnemyObject', 'Joints', 'Bomb',", "'Nut', 'Beam', 'Enemy', 'Friend', 'Hero', 'Launcher', 'Rotor', 'SpikeyBuddy', 'Star', 'Wizard',", "'Beam', 'Enemy', 'Friend', 'Hero', 'Launcher', 'Rotor', 'SpikeyBuddy', 'Star', 'Wizard', 'EnemyEquipedRotor',", "'Launcher', 'Rotor', 'SpikeyBuddy', 'Star', 'Wizard', 'EnemyEquipedRotor', 'CyclingEnemyObject', 'Joints', 'Bomb', 'Contacts']" ]
[ "torch.nn.utils.rnn import pack_padded_sequence fine_tune_encoder = False # Read word map", "errD ####################################################### # (3) Update G network: maximize log(D(G(z))) ######################################################", "print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i)) state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D,", "Show, Attend, and Tell Dataloader train_loader = torch.utils.data.DataLoader( CaptionDataset(data_folder, data_name,", "import Image, ImageFont, ImageDraw from copy import deepcopy from miscc.config", "1] by function vutils.save_image real_img = imgs_tcpu[-1][0:num] vutils.save_image( real_img, '%s/real_samples.png'", "errD = self.train_Dnet(i, count) errD_total += errD ####################################################### # (3)", "1 iend = s_tmp.rfind('.') iteration = int(s_tmp[istart:iend]) s_tmp = s_tmp[:s_tmp.rfind('/')]", "SAT model for idx in range(len(self.fake_imgs)): img = encoder(self.fake_imgs[idx]) scores,", "decode_lengths, batch_first=True).cuda() SATloss += self.SATcriterion(scores, targets) + 1 * ((1.", "None SATloss = 0 # Compute the SAT loss after", "return np.mean(scores), np.std(scores) def load_network(gpus): netG = G_NET() netG.apply(weights_init) netG", "% (s_tmp, imsize) super_img = [] for j in range(num_sentences):", "[batch_size, 3, 128, 128] # self.fake_imgs[2].shape = [batch_size, 3, 256,", "is not None: encoder_optimizer.zero_grad() ####################################################### # (2) Update D network", "netD(wrong_imgs, mu.detach()) fake_logits = netD(fake_imgs.detach(), mu.detach()) # errD_real = criterion(real_logits[0],", "errG_total.item(), kl_loss.item(), end_t - start_t)) save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)", "= summary.scalar('D_loss%d' % idx, errD.item()) self.summary_writer.add_summary(summary_D, count) return errD def", "as np import os import time from PIL import Image,", "functions ################### def compute_mean_covariance(img): batch_size = img.size(0) channel_num = img.size(1)", "%.4f D(wrong):%.4f D(fake) %.4f % (epoch, self.max_epoch, self.num_batches, errD_total.item(), errG_total.item(),", "(epoch, self.max_epoch, self.num_batches, errD_total.item(), errG_total.item(), kl_loss.item(), end_t - start_t)) save_model(self.netG,", "count = start_count start_epoch = start_count // (self.num_batches) for epoch", "count % 100 batch_size = self.real_imgs[0].size(0) criterion, mu = self.criterion,", "= data_1 ####################################################### # (0) Prepare training data ###################################################### self.imgs_tcpu,", "real_img_set = vutils.make_grid(real_img).numpy() real_img_set = np.transpose(real_img_set, (1, 2, 0)) real_img_set", "cfg.TREE.BRANCH_NUM > 2: netsD.append(D_NET256()) if cfg.TREE.BRANCH_NUM > 3: netsD.append(D_NET512()) if", "i, 256) # self.save_singleimages(fake_imgs[-2], filenames, # save_dir, split_dir, i, 128)", "= np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return np.mean(scores), np.std(scores) def negative_log_posterior_probability(predictions, num_splits=1):", "(*) Save Images/Log/Model per SNAPSHOT_INTERVAL: ###################################################### if count % cfg.TRAIN.SNAPSHOT_INTERVAL", "= part * \\ (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) kl", "self.mu, self.logvar real_labels = self.real_labels[:batch_size] for i in range(self.num_Ds): outputs", "torch.save( netD.state_dict(), '%s/netD%d.pth' % (model_dir, i)) print('Save G/Ds models.') def", "Combine with G and SAT first, then back propagation errG_total", "'valid' netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=self.gpus) print(netG)", "mu^2 - sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.mean(KLD_element).mul_(-0.5) return", "like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS *", "evaluate(self, split_dir): if cfg.TRAIN.NET_G == '': print('Error: the path for", "from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3", "if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: save_model(self.netG, avg_param_G, self.netsD, count,", ":], t_embeddings.size(1)) embedding_dim = t_embeddings.size(1) batch_size = imgs[0].size(0) noise.data.resize_(batch_size, nz)", "% (model_dir, i)) print('Save G/Ds models.') def save_real(imgs_tcpu, image_dir): num", "0: summary_D = summary.scalar('D_loss', errD_total.item()) summary_G = summary.scalar('G_loss', errG_total.item()) summary_KL", "imgs_tcpu[-1][0:num] vutils.save_image( real_img, '%s/real_samples.png' % (image_dir), normalize=True) real_img_set = vutils.make_grid(real_img).numpy()", "from SATmodels import Encoder, DecoderWithAttention from torch.nn.utils.rnn import pack_padded_sequence fine_tune_encoder", "%.4f % (epoch, self.max_epoch, self.num_batches, errD_total.item(), errG_total.item(), kl_loss.item(), end_t -", "self.txt_embedding = self.prepare_data(data) # Testing line for real samples if", "covariance / num_pixels return mu, covariance def KL_loss(mu, logvar): #", "batch_size * num_pixels * channel_num img_hat_transpose = img_hat.transpose(1, 2) #", "# for real real_logits = netD(real_imgs, mu.detach()) wrong_logits = netD(wrong_imgs,", "caplens = data[1] data = data_1 ####################################################### # (0) Prepare", "transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Show, Attend, and", "in zip(model.parameters(), new_param): p.data.copy_(new_p) def copy_G_params(model): flatten = deepcopy(list(p.data for", "= copy_G_params(self.netG) self.optimizerG, self.optimizersD = \\ define_optimizers(self.netG, self.netsD) self.criterion =", "elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') !=", "cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 *", "changed to [0, 1] by function vutils.save_image real_img = imgs_tcpu[-1][0:num]", "# batch_size * channel_num * channel_num covariance = torch.bmm(img_hat, img_hat_transpose)", "= criterion(real_logits[0], real_labels) errD_wrong = criterion(wrong_logits[0], fake_labels) errD_fake = criterion(fake_logits[0],", "= count % 100 batch_size = self.real_imgs[0].size(0) criterion, mu, logvar", "self.netsD, count, self.model_dir) # Save images backup_para = copy_G_params(self.netG) load_params(self.netG,", "= errD_fake + errD_fake_uncond # errD = errD_real + errD_wrong", "netG.parameters(): # if p.requires_grad: # G_opt_paras.append(p) optimizerG = optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR,", "= [] num_Ds = len(netsD) for i in range(num_Ds): opt", "encoder = Encoder().cuda() encoder.fine_tune(fine_tune_encoder) encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),", "batch_size = imgs[0].size(0) noise.data.resize_(batch_size, nz) noise.data.normal_(0, 1) fake_img_list = []", "import torch.backends.cudnn as cudnn import torch import torch.nn as nn", "= '%s/super/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if", "* 255 real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) def", "save_superimages(self, images_list, filenames, save_dir, split_dir, imsize): batch_size = images_list[0].size(0) num_sentences", "= DecoderWithAttention(attention_dim=512, embed_dim=512, decoder_dim=512, vocab_size=len(word_map), dropout=0.5).cuda() decoder_optimizer = torch.optim.Adam(params=filter(lambda p:", "t_embeddings.size(1) batch_size = imgs[0].size(0) noise.data.resize_(batch_size, nz) noise.data.normal_(0, 1) fake_img_list =", "range(len(netsD)): netsD[i].apply(weights_init) netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus) # print(netsD[i]) print('# of", "for i in range(len(netsD)): netD = netsD[i] torch.save( netD.state_dict(), '%s/netD%d.pth'", "+ like_cov1 if flag == 0: sum_mu = summary.scalar('G_like_mu1', like_mu1.item())", "zip(model.parameters(), new_param): p.data.copy_(new_p) def copy_G_params(model): flatten = deepcopy(list(p.data for p", "sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)", "if m.bias is not None: m.bias.data.fill_(0.0) def load_params(model, new_param): for", "is changed to [0, 1] by function vutils.save_image real_img =", "Loss_D: %.2f Loss_G: %.2f Loss_KL: %.2f Time: %.2fs ''' #", "count) sum_cov = summary.scalar('G_like_cov2', like_cov2.item()) self.summary_writer.add_summary(sum_cov, count) if self.num_Ds >", "vutils.save_image( fake_img.data, '%s/count_%09d_fake_samples_%d.png' % (image_dir, count, i), normalize=True) fake_img_set =", "+ errD_fake else: errD = errD_real + 0.5 * (errD_wrong", "torch.bmm(img_hat, img_hat_transpose) covariance = covariance / num_pixels return mu, covariance", "vutils.save_image(super_img, savename, nrow=10, normalize=True) def save_singleimages(self, images, filenames, save_dir, split_dir,", "torch.nn.DataParallel(netG, device_ids=self.gpus) print(netG) # state_dict = torch.load(cfg.TRAIN.NET_G) state_dict = \\", "nz) noise.data.normal_(0, 1) fake_img_list = [] for i in range(embedding_dim):", "####################################################### # (*) Prediction and Inception score: ###################################################### pred =", "batch_size = images_list[0].size(0) num_sentences = len(images_list) for i in range(batch_size):", "* nn.MSELoss()(mu1, mu2) like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\", "- sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.mean(KLD_element).mul_(-0.5) return KLD", "= [int(ix) for ix in s_gpus] self.num_gpus = len(self.gpus) torch.cuda.set_device(self.gpus[0])", "after forwarding the SAT model for idx in range(len(self.fake_imgs)): img", "count % 100 == 0: summary_D = summary.scalar('D_loss', errD_total.item()) summary_G", "D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3 # ################## Shared functions", "= True self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus self.max_epoch = cfg.TRAIN.MAX_EPOCH", "shared by data files normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229,", "netsD.append(D_NET512()) if cfg.TREE.BRANCH_NUM > 4: netsD.append(D_NET1024()) # TODO: if cfg.TREE.BRANCH_NUM", "D_NET1024, INCEPTION_V3 # ################## Shared functions ################### def compute_mean_covariance(img): batch_size", "device_ids=gpus) # print(netsD[i]) print('# of netsD', len(netsD)) count = 0", "errD_wrong = errD_wrong + errD_wrong_uncond errD_fake = errD_fake + errD_fake_uncond", "network: maximize log(D(G(z))) ###################################################### kl_loss, errG_total = self.train_Gnet(count) for p,", "range(num_imgs): fake_img = fake_imgs[i][0:num] # The range of fake_img.data (i.e.,", "self.num_gpus = len(self.gpus) torch.cuda.set_device(self.gpus[0]) cudnn.benchmark = True self.batch_size = cfg.TRAIN.BATCH_SIZE", "self.summary_writer.add_summary(summary_KL, count) count += 1 ####################################################### # (*) Save Images/Log/Model", "torch.nn.DataParallel(netG, device_ids=gpus) print(netG) netsD = [] if cfg.TREE.BRANCH_NUM > 0:", "deepcopy(list(p.data for p in model.parameters())) return flatten def compute_inception_score(predictions, num_splits=1):", "# if p.requires_grad: # G_opt_paras.append(p) optimizerG = optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5,", "classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1:", "* 255 real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) summary_writer.add_summary(sup_real_img,", "fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set = np.transpose(fake_img_set, (1, 2, 0)) fake_img_set", "count = int(count) + 1 if cfg.TRAIN.NET_D != '': for", "filenames, save_dir, split_dir, imsize): batch_size = images_list[0].size(0) num_sentences = len(images_list)", "criterion(outputs[0], real_labels) if len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0:", "inception_model = inception_model.cuda() inception_model.eval() return netG, netsD, len(netsD), inception_model, count", "= images_list[0].size(0) num_sentences = len(images_list) for i in range(batch_size): s_tmp", "count = cfg.TRAIN.NET_G[istart:iend] count = int(count) + 1 if cfg.TRAIN.NET_D", "(i + 1) * predictions.shape[0] // num_splits part = predictions[istart:iend,", "first, then back propagation errG_total += SATloss errG_total.backward() self.optimizerG.step() #######################################################", "istart = i * predictions.shape[0] // num_splits iend = (i", "summary.scalar('G_loss', errG_total.item()) summary_KL = summary.scalar('KL_loss', kl_loss.item()) self.summary_writer.add_summary(summary_D, count) self.summary_writer.add_summary(summary_G, count)", "= self.wrong_imgs[idx] fake_imgs = self.fake_imgs[idx] # netD.zero_grad() # Forward real_labels", "errD_total += errD ####################################################### # (3) Update G network: maximize", "self.summary_writer.add_summary(m_incep, count) # mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10) m_nlpp =", "print(t_embeddings[:, 0, :], t_embeddings.size(1)) embedding_dim = t_embeddings.size(1) batch_size = imgs[0].size(0)", "summary_D = summary.scalar('D_loss%d' % idx, errD.item()) self.summary_writer.add_summary(summary_D, count) return errD", "to evaluate mode netG.eval() for step, data in enumerate(self.data_loader, 0):", "torch.cuda.set_device(self.gpus[0]) cudnn.benchmark = True self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus self.max_epoch", "img.size(1) height = img.size(2) width = img.size(3) num_pixels = height", "self.real_imgs[idx] wrong_imgs = self.wrong_imgs[idx] fake_imgs = self.fake_imgs[idx] # netD.zero_grad() #", "# G_opt_paras.append(p) optimizerG = optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999)) return optimizerG,", "= summary.scalar('G_loss%d' % i, errG.item()) self.summary_writer.add_summary(summary_D, count) # Compute color", "# Forward real_labels = self.real_labels[:batch_size] fake_labels = self.fake_labels[:batch_size] # for", "self.fake_imgs[i][0:num]) # is still [-1. 1]... vutils.save_image( fake_img.data, '%s/count_%09d_fake_samples_%d.png' %", "the SAT loss after forwarding the SAT model for idx", "Image, ImageFont, ImageDraw from copy import deepcopy from miscc.config import", "'TRAIN', transform=transforms.Compose([normalize])), batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True) if cfg.CUDA: self.criterion.cuda() self.SATcriterion.cuda()", "evaluate mode netG.eval() for step, data in enumerate(self.data_loader, 0): imgs,", "= '%s/iteration%d' % (s_tmp, iteration) nz = cfg.GAN.Z_DIM noise =", "print(netG) # state_dict = torch.load(cfg.TRAIN.NET_G) state_dict = \\ torch.load(cfg.TRAIN.NET_G, map_location=lambda", "self.model_dir) self.summary_writer.close() def save_superimages(self, images_list, filenames, save_dir, split_dir, imsize): batch_size", "map_location=lambda storage, loc: storage) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) # the", "errD_real = errD_real + errD_real_uncond errD_wrong = errD_wrong + errD_wrong_uncond", "= errG_total + kl_loss # Postpone the backward propagation #", "torch.autograd import Variable import torch.optim as optim import torchvision.utils as", "images[i].add(1).div(2).mul(255).clamp(0, 255).byte() ndarr = img.permute(1, 2, 0).data.cpu().numpy() im = Image.fromarray(ndarr)", "fake_img_set = fake_img_set.astype(np.uint8) sup_fake_img = summary.image('fake_img%d' % i, fake_img_set) summary_writer.add_summary(sup_fake_img,", "= Variable(torch.FloatTensor(self.batch_size, nz)) fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1)) # Data", "filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if not os.path.isdir(folder): print('Make a new", "train_loader = torch.utils.data.DataLoader( CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])), batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS),", "s_tmp.rfind('_') + 1 iend = s_tmp.rfind('.') iteration = int(s_tmp[istart:iend]) s_tmp", "errD = errD_real + 0.5 * (errD_wrong + errD_fake) #", "_, _ = netG(noise, t_embeddings[:, i, :]) if cfg.TEST.B_EXAMPLE: #", "if encoder_optimizer is not None: encoder_optimizer.step() ####################################################### # (*) Prediction", "print('mean:', mean, 'std', std) m_incep = summary.scalar('Inception_mean', mean) self.summary_writer.add_summary(m_incep, count)", "noise.cuda(), fixed_noise.cuda() predictions = [] count = start_count start_epoch =", "= cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(real_logits[1], real_labels) errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS *", "split_dir, 64) # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 128) self.save_superimages(fake_img_list,", "= 'CUB_5_cap_per_img_5_min_word_freq' # base name shared by data files normalize", "i in range(embedding_dim): fake_imgs, _, _ = netG(noise, t_embeddings[:, i,", "netD.zero_grad() # Forward real_labels = self.real_labels[:batch_size] fake_labels = self.fake_labels[:batch_size] #", "\\ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu1 + like_cov1", "# fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu()) else: self.save_singleimages(fake_imgs[-1], filenames, save_dir, split_dir, i, 256)", "i)) netsD[i].load_state_dict(state_dict) inception_model = INCEPTION_V3() if cfg.CUDA: netG.cuda() for i", "1.0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear')", "real_img_set = real_img_set * 255 real_img_set = real_img_set.astype(np.uint8) sup_real_img =", "decoder_dim=512, vocab_size=len(word_map), dropout=0.5).cuda() decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=4e-4)", "of netsD', len(netsD)) count = 0 if cfg.TRAIN.NET_G != '':", "filenames, # save_dir, split_dir, i, 128) # self.save_singleimages(fake_imgs[-3], filenames, #", "self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy()) if count % 100 == 0: summary_D =", "fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1)) # Data parameters data_folder =", "[batch_size, 3, 256, 256] ####################################################### # (*) Forward fake images", "= data[1] data = data_1 ####################################################### # (0) Prepare training", "# load_params(self.netG, backup_para) # Compute inception score if len(predictions) >", "range(images.size(0)): s_tmp = '%s/single_samples/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder =", "real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else: real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i])) return imgs, real_vimgs, wrong_vimgs, vembedding", "= cfg.TRAIN.BATCH_SIZE * self.num_gpus self.max_epoch = cfg.TRAIN.MAX_EPOCH self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL", "cfg.TRAIN.COEFF.COLOR_LOSS > 0: if self.num_Ds > 1: mu1, covariance1 =", "state_dict = torch.load(cfg.TRAIN.NET_G) state_dict = \\ torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc:", "= summary.image('real_img', real_img_set) def save_img_results(imgs_tcpu, fake_imgs, num_imgs, count, image_dir, summary_writer):", "channel_num, num_pixels) # batch_size * num_pixels * channel_num img_hat_transpose =", "= img.size(0) channel_num = img.size(1) height = img.size(2) width =", "= noise.cuda(), fixed_noise.cuda() predictions = [] count = start_count start_epoch", "i in range(len(netsD)): netsD[i].cuda() inception_model = inception_model.cuda() inception_model.eval() return netG,", "flag = count % 100 batch_size = self.real_imgs[0].size(0) criterion, mu,", "like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\ nn.MSELoss()(covariance1, covariance2) errG_total", "# save_dir, split_dir, 64) # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir,", "(np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) kl = np.mean(np.sum(kl, 1)) scores.append(np.exp(kl))", "-0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD_element", "for epoch in range(start_epoch, self.max_epoch): start_t = time.time() # for", "# self.fake_imgs, _, _ = self.netG(fixed_noise, self.txt_embedding) save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds,", "# batch_size * channel_num * 1 * 1 mu =", "enumerate(zip(self.data_loader, train_loader), 0): data_1 = data[0] _, caps, caplens =", "1) fake_img_list = [] for i in range(embedding_dim): fake_imgs, _,", "numpy as np import os import time from PIL import", "netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) istart = cfg.TRAIN.NET_G.rfind('_') + 1 iend", "vembedding def train_Dnet(self, idx, count): flag = count % 100", "self.real_imgs, self.wrong_imgs, \\ self.txt_embedding = self.prepare_data(data) # Testing line for", "= Variable(t_embeddings).cuda() else: t_embeddings = Variable(t_embeddings) # print(t_embeddings[:, 0, :],", "# batch_size * num_pixels * channel_num img_hat_transpose = img_hat.transpose(1, 2)", "\\ criterion(fake_logits[1], fake_labels) # errD_real = errD_real + errD_real_uncond errD_wrong", "= self.real_imgs[0].size(0) criterion, mu, logvar = self.criterion, self.mu, self.logvar real_labels", "data in enumerate(zip(self.data_loader, train_loader), 0): data_1 = data[0] _, caps,", "G/Ds models.') def save_real(imgs_tcpu, image_dir): num = cfg.TRAIN.VIS_COUNT # The", "folder: ', folder) mkdir_p(folder) fullpath = '%s_%d_sentence%d.png' % (s_tmp, imsize,", "cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else: real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i])) return imgs, real_vimgs, wrong_vimgs,", "idx, errD.item()) self.summary_writer.add_summary(summary_D, count) return errD def train_Gnet(self, count): self.netG.zero_grad()", "errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(real_logits[1], real_labels) errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS", "D_NET512, D_NET1024, INCEPTION_V3 # ################## Shared functions ################### def compute_mean_covariance(img):", "kl = np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return np.mean(scores), np.std(scores) def negative_log_posterior_probability(predictions,", "enumerate(self.data_loader, 0): imgs, t_embeddings, filenames = data if cfg.CUDA: t_embeddings", "summary_writer): num = cfg.TRAIN.VIS_COUNT # The range of real_img (i.e.,", "as cudnn import torch import torch.nn as nn from torch.autograd", "* np.log(np.max(part, 1)) result = np.mean(result) scores.append(result) return np.mean(scores), np.std(scores)", "data = data_1 ####################################################### # (0) Prepare training data ######################################################", "= (i + 1) * predictions.shape[0] // num_splits part =", "if cfg.TREE.BRANCH_NUM > 0: netsD.append(D_NET64()) if cfg.TREE.BRANCH_NUM > 1: netsD.append(D_NET128())", "data_name + '.json') with open(word_map_file, 'r') as j: word_map =", "img = images_list[j][i] # print(img.size()) img = img.view(1, 3, imsize,", "= noise.cuda() # switch to evaluate mode netG.eval() for step,", "if cfg.TRAIN.NET_G != '': state_dict = torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict) print('Load ',", "def define_optimizers(netG, netsD): optimizersD = [] num_Ds = len(netsD) for", "netsD.append(D_NET64()) if cfg.TREE.BRANCH_NUM > 1: netsD.append(D_NET128()) if cfg.TREE.BRANCH_NUM > 2:", "128, 128] # self.fake_imgs[2].shape = [batch_size, 3, 256, 256] #######################################################", "self.save_singleimages(fake_imgs[-1], filenames, save_dir, split_dir, i, 256) # self.save_singleimages(fake_imgs[-2], filenames, #", "* channel_num * num_pixels img_hat = img - mu.expand_as(img) img_hat", "self.image_dir, self.summary_writer) # load_params(self.netG, backup_para) # Compute inception score if", "+ errG_patch errG_total = errG_total + errG if flag ==", "torch.load(cfg.TRAIN.NET_G) state_dict = \\ torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage) netG.load_state_dict(state_dict)", "Loss_G: %.2f Loss_KL: %.2f Time: %.2fs ''' # D(real): %.4f", "negative_log_posterior_probability(predictions, num_splits=1): # print('predictions', predictions.shape) scores = [] for i", "import torchvision.utils as vutils import numpy as np import os", "G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=self.gpus) print(netG) # state_dict =", "256, 256] ####################################################### # (*) Forward fake images to SAT", "# The range of fake_img.data (i.e., self.fake_imgs[i][0:num]) # is still", "def copy_G_params(model): flatten = deepcopy(list(p.data for p in model.parameters())) return", "if cfg.TRAIN.COEFF.COLOR_LOSS > 0: if self.num_Ds > 1: mu1, covariance1", "###################################################### errD_total = 0 for i in range(self.num_Ds): errD =", "m_incep = summary.scalar('Inception_mean', mean) self.summary_writer.add_summary(m_incep, count) # mean_nlpp, std_nlpp =", "flag == 0: sum_mu = summary.scalar('G_like_mu1', like_mu1.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov", "print('Make a new folder: ', folder) mkdir_p(folder) fullpath = '%s_%d_sentence%d.png'", "height = img.size(2) width = img.size(3) num_pixels = height *", "print('Load ', cfg.TRAIN.NET_G) # the path to save generated images", "import torch.optim as optim import torchvision.utils as vutils import numpy", "nz).normal_(0, 1)) # Data parameters data_folder = 'birds_output' # folder", "img.size(0) channel_num = img.size(1) height = img.size(2) width = img.size(3)", "= '%s_%d.png' % (s_tmp, imsize) super_img = [] for j", "= optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999)) optimizersD.append(opt) # G_opt_paras = []", "loss after forwarding the SAT model for idx in range(len(self.fake_imgs)):", "data files normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])", "fake_imgs, _, _ = netG(noise, t_embeddings[:, i, :]) if cfg.TEST.B_EXAMPLE:", "= caps_sorted[:, 1:] scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda() targets,", "Save images backup_para = copy_G_params(self.netG) load_params(self.netG, avg_param_G) # self.fake_imgs, _,", "# class condGANTrainer(object): def __init__(self, output_dir, data_loader, imsize): if cfg.TRAIN.FLAG:", "wrong_vimgs.append(Variable(w_imgs[i])) return imgs, real_vimgs, wrong_vimgs, vembedding def train_Dnet(self, idx, count):", "encoder(self.fake_imgs[idx]) scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(img, caps, caplens)", "else: errD = errD_real + 0.5 * (errD_wrong + errD_fake)", "###################################################### # Update weights decoder_optimizer.step() if encoder_optimizer is not None:", "errD def train_Gnet(self, count): self.netG.zero_grad() errG_total = 0 flag =", "errG_total = errG_total + kl_loss # Postpone the backward propagation", "netG.eval() for step, data in enumerate(self.data_loader, 0): imgs, t_embeddings, filenames", "% i, errG.item()) self.summary_writer.add_summary(summary_D, count) # Compute color consistency losses", "self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \\ self.txt_embedding = self.prepare_data(data) # Testing line", "else None SATloss = 0 # Compute the SAT loss", "'std', std) m_incep = summary.scalar('Inception_mean', mean) self.summary_writer.add_summary(m_incep, count) # mean_nlpp,", "self.num_Ds, count, self.image_dir, self.summary_writer) # load_params(self.netG, backup_para) # Compute inception", "(errD_wrong + errD_fake) # backward errD.backward() # update parameters optD.step()", "0: errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\\ criterion(outputs[1], real_labels) errG = errG", "= 'birds_output' # folder with data files saved by create_input_files.py", "count, self.model_dir) # Save images backup_para = copy_G_params(self.netG) load_params(self.netG, avg_param_G)", "torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=4e-4) encoder = Encoder().cuda() encoder.fine_tune(fine_tune_encoder) encoder_optimizer", "for p, new_p in zip(model.parameters(), new_param): p.data.copy_(new_p) def copy_G_params(model): flatten", "print_function from six.moves import range import torchvision.transforms as transforms import", "errD_total.item()) summary_G = summary.scalar('G_loss', errG_total.item()) summary_KL = summary.scalar('KL_loss', kl_loss.item()) self.summary_writer.add_summary(summary_D,", "decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=4e-4) encoder = Encoder().cuda()", "caps, caplens) targets = caps_sorted[:, 1:] scores, _ = pack_padded_sequence(scores,", "pred = self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy()) if count % 100 == 0:", "pin_memory=True) if cfg.CUDA: self.criterion.cuda() self.SATcriterion.cuda() # Compute SATloss self.real_labels =", "and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(real_logits[1],", "self.fake_labels.cuda() self.gradient_one = self.gradient_one.cuda() self.gradient_half = self.gradient_half.cuda() noise, fixed_noise =", "np.transpose(fake_img_set, (1, 2, 0)) fake_img_set = (fake_img_set + 1) *", "i, errG.item()) self.summary_writer.add_summary(summary_D, count) # Compute color consistency losses if", "+ like_mu1 + like_cov1 if flag == 0: sum_mu =", "netsD, len(netsD), inception_model, count def define_optimizers(netG, netsD): optimizersD = []", "mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.mean(KLD_element).mul_(-0.5) return KLD def weights_init(m): classname =", "in enumerate(self.data_loader, 0): for step, data in enumerate(zip(self.data_loader, train_loader), 0):", "= img_hat.transpose(1, 2) # batch_size * channel_num * channel_num covariance", "* channel_num covariance = torch.bmm(img_hat, img_hat_transpose) covariance = covariance /", "switch to evaluate mode netG.eval() for step, data in enumerate(self.data_loader,", "- mu^2 - sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.mean(KLD_element).mul_(-0.5)", "self.real_labels[:batch_size] for i in range(self.num_Ds): outputs = self.netsD[i](self.fake_imgs[i], mu) errG", "'birds_output' # folder with data files saved by create_input_files.py data_name", "errD_real + 0.5 * (errD_wrong + errD_fake) # backward errD.backward()", "mu2) like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\ nn.MSELoss()(covariance1, covariance2)", "in range(len(netsD)): netD = netsD[i] torch.save( netD.state_dict(), '%s/netD%d.pth' % (model_dir,", "\\ criterion(real_logits[1], real_labels) errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(wrong_logits[1], real_labels)", "= KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL errG_total = errG_total + kl_loss", "self.fake_labels = self.fake_labels.cuda() self.gradient_one = self.gradient_one.cuda() self.gradient_half = self.gradient_half.cuda() noise,", "mu = self.criterion, self.mu netD, optD = self.netsD[idx], self.optimizersD[idx] real_imgs", "for ix in s_gpus] self.num_gpus = len(self.gpus) torch.cuda.set_device(self.gpus[0]) cudnn.benchmark =", "= os.path.join(output_dir, 'Log') mkdir_p(self.model_dir) mkdir_p(self.image_dir) mkdir_p(self.log_dir) self.summary_writer = FileWriter(self.log_dir) s_gpus", "like_mu2 + like_cov2 if flag == 0: sum_mu = summary.scalar('G_like_mu2',", "# Read word map word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name", "tensorboard import FileWriter from model import G_NET, D_NET64, D_NET128, D_NET256,", "else: t_embeddings = Variable(t_embeddings) # print(t_embeddings[:, 0, :], t_embeddings.size(1)) embedding_dim", "print ('Checking real samples at first...') save_real(self.imgs_tcpu, self.image_dir) ####################################################### #", "nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu2 + like_cov2 if", "data_1 = data[0] _, caps, caplens = data[1] data =", "inception_model = INCEPTION_V3() if cfg.CUDA: netG.cuda() for i in range(len(netsD)):", "// (self.num_batches) for epoch in range(start_epoch, self.max_epoch): start_t = time.time()", "Encoder, DecoderWithAttention from torch.nn.utils.rnn import pack_padded_sequence fine_tune_encoder = False #", "errD_fake = errD_fake + errD_fake_uncond # errD = errD_real +", "'%s_%d.png' % (s_tmp, imsize) super_img = [] for j in", "= [batch_size, 3, 256, 256] ####################################################### # (*) Forward fake", "count) count += 1 ####################################################### # (*) Save Images/Log/Model per", "# G_opt_paras = [] # for p in netG.parameters(): #", "'%s/netD%d.pth' % (model_dir, i)) print('Save G/Ds models.') def save_real(imgs_tcpu, image_dir):", "compute_inception_score(predictions, num_splits=1): # print('predictions', predictions.shape) scores = [] for i", "count, self.model_dir) self.summary_writer.close() def save_superimages(self, images_list, filenames, save_dir, split_dir, imsize):", "as optim import torchvision.utils as vutils import numpy as np", "cfg.TRAIN.COEFF.UNCOND_LOSS *\\ criterion(outputs[1], real_labels) errG = errG + errG_patch errG_total", "not None: encoder_optimizer.zero_grad() ####################################################### # (2) Update D network ######################################################", "'': state_dict = torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) istart =", "color consistency losses if cfg.TRAIN.COEFF.COLOR_LOSS > 0: if self.num_Ds >", "fake_imgs, num_imgs, count, image_dir, summary_writer): num = cfg.TRAIN.VIS_COUNT # The", "* predictions.shape[0] // num_splits part = predictions[istart:iend, :] result =", "1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\\ criterion(outputs[1],", "errG_total = 0 flag = count % 100 batch_size =", "num_splits=1): # print('predictions', predictions.shape) scores = [] for i in", "+ 1) * 255 / 2 fake_img_set = fake_img_set.astype(np.uint8) sup_fake_img", "0 if cfg.TRAIN.NET_G != '': state_dict = torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict) print('Load", "####################################################### # (*) Forward fake images to SAT ###################################################### from", "Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1)) # Data parameters data_folder = 'birds_output' #", "data_folder = 'birds_output' # folder with data files saved by", "samples if epoch == start_epoch and step == 0: print", "in range(batch_size): s_tmp = '%s/super/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder", "D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3 # ################## Shared functions ###################", "== 0: sum_mu = summary.scalar('G_like_mu2', like_mu2.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov =", "= self.gradient_one.cuda() self.gradient_half = self.gradient_half.cuda() noise, fixed_noise = noise.cuda(), fixed_noise.cuda()", "and step == 0: print ('Checking real samples at first...')", "10) # print('mean:', mean, 'std', std) m_incep = summary.scalar('Inception_mean', mean)", "for i in range(len(netsD)): print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i)) state_dict", "= summary.scalar('G_like_cov2', like_cov2.item()) self.summary_writer.add_summary(sum_cov, count) if self.num_Ds > 2: mu1,", "# (*) Prediction and Inception score: ###################################################### pred = self.inception_model(self.fake_imgs[-1].detach())", "range(start_epoch, self.max_epoch): start_t = time.time() # for step, data in", "caps, caplens = data[1] data = data_1 ####################################################### # (0)", "data_loader, imsize): if cfg.TRAIN.FLAG: self.model_dir = os.path.join(output_dir, 'Model') self.image_dir =", "netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) # the path to save generated", "samples at first...') save_real(self.imgs_tcpu, self.image_dir) ####################################################### # (1) Generate fake", "self.criterion.cuda() self.SATcriterion.cuda() # Compute SATloss self.real_labels = self.real_labels.cuda() self.fake_labels =", "state_dict = \\ torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage) netG.load_state_dict(state_dict) print('Load", "real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) def save_img_results(imgs_tcpu, fake_imgs,", "os import time from PIL import Image, ImageFont, ImageDraw from", "+= self.SATcriterion(scores, targets) + 1 * ((1. - alphas.sum(dim=1)) **", "mu = img.mean(2, keepdim=True).mean(3, keepdim=True) # batch_size * channel_num *", "fake_logits = netD(fake_imgs.detach(), mu.detach()) # errD_real = criterion(real_logits[0], real_labels) errD_wrong", "vembedding = Variable(t_embedding) for i in range(self.num_Ds): if cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda())", "self.num_Ds > 2: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2]) mu2, covariance2 =", "0.224, 0.225]) # Show, Attend, and Tell Dataloader train_loader =", "netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus) # print(netsD[i]) print('# of netsD', len(netsD))", "errG_total + like_mu1 + like_cov1 if flag == 0: sum_mu", "criterion(fake_logits[0], fake_labels) if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0:", "_, caps, caplens = data[1] data = data_1 ####################################################### #", "channel_num * channel_num covariance = torch.bmm(img_hat, img_hat_transpose) covariance = covariance", "in range(self.num_Ds): if cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else: real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i])) return", "word map word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')", "count += 1 ####################################################### # (*) Save Images/Log/Model per SNAPSHOT_INTERVAL:", "count) if self.num_Ds > 2: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2]) mu2,", "* \\ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu1 +", "to [0, 1] by function vutils.save_image real_img = imgs_tcpu[-1][0:num] vutils.save_image(", "for step, data in enumerate(zip(self.data_loader, train_loader), 0): data_1 = data[0]", "real_labels = self.real_labels[:batch_size] fake_labels = self.fake_labels[:batch_size] # for real real_logits", "from torch.autograd import Variable import torch.optim as optim import torchvision.utils", "# update parameters optD.step() # log if flag == 0:", "torch.save( netG.state_dict(), '%s/netG_%d.pth' % (model_dir, epoch)) for i in range(len(netsD)):", "= errG_total + like_mu2 + like_cov2 if flag == 0:", "data ###################################################### self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \\ self.txt_embedding = self.prepare_data(data) #", "if count % 100 == 0: summary_D = summary.scalar('D_loss', errD_total.item())", "len(netsD) for i in range(num_Ds): opt = optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5,", "torch.FloatTensor([0.5]) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) fixed_noise =", "range(self.num_Ds): if cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else: real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i])) return imgs,", "> 0: errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\\ criterion(outputs[1], real_labels) errG =", "= FileWriter(self.log_dir) s_gpus = cfg.GPU_ID.split(',') self.gpus = [int(ix) for ix", "netsD, epoch, model_dir): load_params(netG, avg_param_G) torch.save( netG.state_dict(), '%s/netG_%d.pth' % (model_dir,", "= cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0,", "Testing line for real samples if epoch == start_epoch and", "s_tmp = '%s/single_samples/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')]", "imsize) super_img = [] for j in range(num_sentences): img =", "self.optimizersD[idx] real_imgs = self.real_imgs[idx] wrong_imgs = self.wrong_imgs[idx] fake_imgs = self.fake_imgs[idx]", "with data files saved by create_input_files.py data_name = 'CUB_5_cap_per_img_5_min_word_freq' #", "= len(self.gpus) torch.cuda.set_device(self.gpus[0]) cudnn.benchmark = True self.batch_size = cfg.TRAIN.BATCH_SIZE *", "nn.BCELoss() self.SATcriterion = nn.CrossEntropyLoss() self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0))", "backward propagation # errG_total.backward() # self.optimizerG.step() return kl_loss, errG_total def", "= errD_wrong + errD_wrong_uncond errD_fake = errD_fake + errD_fake_uncond #", "self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader = data_loader self.num_batches = len(self.data_loader) def", "- alphas.sum(dim=1)) ** 2).mean() # Set zero_grad for encoder/decoder decoder_optimizer.zero_grad()", "scores = [] for i in range(num_splits): istart = i", "np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return np.mean(scores), np.std(scores) def negative_log_posterior_probability(predictions, num_splits=1): #", "epoch, model_dir): load_params(netG, avg_param_G) torch.save( netG.state_dict(), '%s/netG_%d.pth' % (model_dir, epoch))", "= s_tmp[:s_tmp.rfind('/')] save_dir = '%s/iteration%d' % (s_tmp, iteration) nz =", "m.bias.data.fill_(0.0) def load_params(model, new_param): for p, new_p in zip(model.parameters(), new_param):", "# Update weights decoder_optimizer.step() if encoder_optimizer is not None: encoder_optimizer.step()", "= images_list[j][i] # print(img.size()) img = img.view(1, 3, imsize, imsize)", "self.wrong_imgs, \\ self.txt_embedding = self.prepare_data(data) # Testing line for real", "print(netsD[i]) print('# of netsD', len(netsD)) count = 0 if cfg.TRAIN.NET_G", "# Testing line for real samples if epoch == start_epoch", "new folder: ', folder) mkdir_p(folder) # savename = '%s_%d.png' %", "mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1,", "self.model_dir) # Save images backup_para = copy_G_params(self.netG) load_params(self.netG, avg_param_G) #", "in range(len(netsD)): netsD[i].apply(weights_init) netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus) # print(netsD[i]) print('#", "self.fake_imgs, self.num_Ds, count, self.image_dir, self.summary_writer) # load_params(self.netG, backup_para) # Compute", "filenames, # save_dir, split_dir, 128) self.save_superimages(fake_img_list, filenames, save_dir, split_dir, 256)", "epoch == start_epoch and step == 0: print ('Checking real", "for p in model.parameters())) return flatten def compute_inception_score(predictions, num_splits=1): #", "# self.fake_imgs[2].shape = [batch_size, 3, 256, 256] ####################################################### # (*)", "for p, avg_p in zip(self.netG.parameters(), avg_param_G): avg_p.mul_(0.999).add_(0.001, p.data) # Combine", "for i in range(batch_size): s_tmp = '%s/super/%s/%s' %\\ (save_dir, split_dir,", "###################################################### noise.data.normal_(0, 1) self.fake_imgs, self.mu, self.logvar = \\ self.netG(noise, self.txt_embedding)", "self.optimizerG.step() return kl_loss, errG_total def train(self): self.netG, self.netsD, self.num_Ds,\\ self.inception_model,", "s_tmp[:s_tmp.rfind('/')] save_dir = '%s/iteration%d' % (s_tmp, iteration) nz = cfg.GAN.Z_DIM", "[-1, 1] to [0, 255] img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte() ndarr", "def train_Gnet(self, count): self.netG.zero_grad() errG_total = 0 flag = count", "data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base name shared by data files", "i), normalize=True) fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set = np.transpose(fake_img_set, (1, 2,", "Variable(t_embedding).cuda() else: vembedding = Variable(t_embedding) for i in range(self.num_Ds): if", "###################################################### self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \\ self.txt_embedding = self.prepare_data(data) # Testing", "if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errD_real_uncond =", "0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: nn.init.orthogonal(m.weight.data, 1.0) if m.bias", "= -1. * np.log(np.max(part, 1)) result = np.mean(result) scores.append(result) return", "> 5: for i in range(len(netsD)): netsD[i].apply(weights_init) netsD[i] = torch.nn.DataParallel(netsD[i],", "cfg.CUDA: t_embeddings = Variable(t_embeddings).cuda() else: t_embeddings = Variable(t_embeddings) # print(t_embeddings[:,", "images s_tmp = cfg.TRAIN.NET_G istart = s_tmp.rfind('_') + 1 iend", "torch.optim as optim import torchvision.utils as vutils import numpy as", "sup_fake_img = summary.image('fake_img%d' % i, fake_img_set) summary_writer.add_summary(sup_fake_img, count) summary_writer.flush() #", "return mu, covariance def KL_loss(mu, logvar): # -0.5 * sum(1", "from tensorboard import summary from tensorboard import FileWriter from model", "fake_labels) if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errD_real_uncond", "targets) + 1 * ((1. - alphas.sum(dim=1)) ** 2).mean() #", "if cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else: real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i])) return imgs, real_vimgs,", "num_pixels) # batch_size * num_pixels * channel_num img_hat_transpose = img_hat.transpose(1,", "True self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus self.max_epoch = cfg.TRAIN.MAX_EPOCH self.snapshot_interval", "state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i)) netsD[i].load_state_dict(state_dict) inception_model = INCEPTION_V3()", "SATloss += self.SATcriterion(scores, targets) + 1 * ((1. - alphas.sum(dim=1))", "word_map = json.load(j) # Define the encoder/decoder structure for SAT", "open(word_map_file, 'r') as j: word_map = json.load(j) # Define the", "vutils.save_image( real_img, '%s/real_samples.png' % (image_dir), normalize=True) real_img_set = vutils.make_grid(real_img).numpy() real_img_set", "log(sigma^2) - mu^2 - sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD =", "new_p in zip(model.parameters(), new_param): p.data.copy_(new_p) def copy_G_params(model): flatten = deepcopy(list(p.data", "', cfg.TRAIN.NET_G) istart = cfg.TRAIN.NET_G.rfind('_') + 1 iend = cfg.TRAIN.NET_G.rfind('.')", "structure for SAT model decoder = DecoderWithAttention(attention_dim=512, embed_dim=512, decoder_dim=512, vocab_size=len(word_map),", "fixed_noise = noise.cuda(), fixed_noise.cuda() predictions = [] count = start_count", "for i in range(num_imgs): fake_img = fake_imgs[i][0:num] # The range", "netsD', len(netsD)) count = 0 if cfg.TRAIN.NET_G != '': state_dict", "= self.real_labels.cuda() self.fake_labels = self.fake_labels.cuda() self.gradient_one = self.gradient_one.cuda() self.gradient_half =", "summary.scalar('KL_loss', kl_loss.item()) self.summary_writer.add_summary(summary_D, count) self.summary_writer.add_summary(summary_G, count) self.summary_writer.add_summary(summary_KL, count) count +=", "= np.transpose(fake_img_set, (1, 2, 0)) fake_img_set = (fake_img_set + 1)", "= height * width # batch_size * channel_num * 1", "save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) # Save images backup_para =", "cfg.TREE.BRANCH_NUM > 1: netsD.append(D_NET128()) if cfg.TREE.BRANCH_NUM > 2: netsD.append(D_NET256()) if", "errD_real = criterion(real_logits[0], real_labels) errD_wrong = criterion(wrong_logits[0], fake_labels) errD_fake =", "Variable(t_embeddings).cuda() else: t_embeddings = Variable(t_embeddings) # print(t_embeddings[:, 0, :], t_embeddings.size(1))", "(*) Update SAT network: ###################################################### # Update weights decoder_optimizer.step() if", "\\ compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov2 =", "self.summary_writer.add_summary(summary_D, count) # Compute color consistency losses if cfg.TRAIN.COEFF.COLOR_LOSS >", "1:] scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda() targets, _ =", "<reponame>mazzaAnt/StackGAN-v2<gh_stars>1-10 from __future__ import print_function from six.moves import range import", "[0, 255] img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte() ndarr = img.permute(1, 2,", "from PIL import Image, ImageFont, ImageDraw from copy import deepcopy", "Update D network ###################################################### errD_total = 0 for i in", "from miscc.config import cfg from miscc.utils import mkdir_p from CaptionDatasets", "not os.path.isdir(folder): print('Make a new folder: ', folder) mkdir_p(folder) #", "at first...') save_real(self.imgs_tcpu, self.image_dir) ####################################################### # (1) Generate fake images", "= optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999)) return optimizerG, optimizersD def save_model(netG,", "result = np.mean(result) scores.append(result) return np.mean(scores), np.std(scores) def load_network(gpus): netG", "summary.image('real_img', real_img_set) def save_img_results(imgs_tcpu, fake_imgs, num_imgs, count, image_dir, summary_writer): num", "= self.train_Gnet(count) for p, avg_p in zip(self.netG.parameters(), avg_param_G): avg_p.mul_(0.999).add_(0.001, p.data)", "i in range(self.num_Ds): if cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else: real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i]))", "= i * predictions.shape[0] // num_splits iend = (i +", "[] # for p in netG.parameters(): # if p.requires_grad: #", "* 255 / 2 fake_img_set = fake_img_set.astype(np.uint8) sup_fake_img = summary.image('fake_img%d'", "errD_real + errD_real_uncond errD_wrong = errD_wrong + errD_wrong_uncond errD_fake =", "'': for i in range(len(netsD)): print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))", "+ 0.5 * (errD_wrong + errD_fake) # backward errD.backward() #", "# Combine with G and SAT first, then back propagation", "as vutils import numpy as np import os import time", "print('Error: the path for morels is not found!') else: #", "Forward real_labels = self.real_labels[:batch_size] fake_labels = self.fake_labels[:batch_size] # for real", "!= '': for i in range(len(netsD)): print('Load %s_%d.pth' % (cfg.TRAIN.NET_D,", "the SAT model for idx in range(len(self.fake_imgs)): img = encoder(self.fake_imgs[idx])", "create_input_files.py data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base name shared by data", "save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds, count, self.image_dir, self.summary_writer) # load_params(self.netG, backup_para) #", "print(netG) netsD = [] if cfg.TREE.BRANCH_NUM > 0: netsD.append(D_NET64()) if", "self.gradient_half.cuda() noise, fixed_noise = noise.cuda(), fixed_noise.cuda() predictions = [] count", "i in range(batch_size): s_tmp = '%s/super/%s/%s' %\\ (save_dir, split_dir, filenames[i])", "copy_G_params(model): flatten = deepcopy(list(p.data for p in model.parameters())) return flatten", "= load_network(self.gpus) avg_param_G = copy_G_params(self.netG) self.optimizerG, self.optimizersD = \\ define_optimizers(self.netG,", "0) vutils.save_image(super_img, savename, nrow=10, normalize=True) def save_singleimages(self, images, filenames, save_dir,", "num_imgs, count, image_dir, summary_writer): num = cfg.TRAIN.VIS_COUNT # The range", "predictions.append(pred.data.cpu().numpy()) if count % 100 == 0: summary_D = summary.scalar('D_loss',", "img_hat = img_hat.view(batch_size, channel_num, num_pixels) # batch_size * num_pixels *", "= criterion(wrong_logits[0], fake_labels) errD_fake = criterion(fake_logits[0], fake_labels) if len(real_logits) >", "self.netG(noise, self.txt_embedding) # len(self.fake_imgs) = NUM_BRANCHES # self.fake_imgs[0].shape = [batch_size,", "real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) summary_writer.add_summary(sup_real_img, count) for", "# for step, data in enumerate(self.data_loader, 0): for step, data", "not None: encoder_optimizer.step() ####################################################### # (*) Prediction and Inception score:", "and Tell Dataloader train_loader = torch.utils.data.DataLoader( CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])),", "print('Save G/Ds models.') def save_real(imgs_tcpu, image_dir): num = cfg.TRAIN.VIS_COUNT #", "self.prepare_data(data) # Testing line for real samples if epoch ==", "SATloss = 0 # Compute the SAT loss after forwarding", "= [] end_t = time.time() print('''[%d/%d][%d] Loss_D: %.2f Loss_G: %.2f", "> 2: netsD.append(D_NET256()) if cfg.TREE.BRANCH_NUM > 3: netsD.append(D_NET512()) if cfg.TREE.BRANCH_NUM", "data if cfg.CUDA: t_embeddings = Variable(t_embeddings).cuda() else: t_embeddings = Variable(t_embeddings)", "cfg.TRAIN.MAX_EPOCH self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader = data_loader self.num_batches = len(self.data_loader)", "FileWriter from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024,", "errG_total + errG if flag == 0: summary_D = summary.scalar('G_loss%d'", "split_dir, i, 64) # break if cfg.TEST.B_EXAMPLE: # self.save_superimages(fake_img_list, filenames,", "= summary.scalar('D_loss', errD_total.item()) summary_G = summary.scalar('G_loss', errG_total.item()) summary_KL = summary.scalar('KL_loss',", "(fake_img_set + 1) * 255 / 2 fake_img_set = fake_img_set.astype(np.uint8)", "self.image_dir) ####################################################### # (1) Generate fake images ###################################################### noise.data.normal_(0, 1)", "= self.netsD[i](self.fake_imgs[i], mu) errG = criterion(outputs[0], real_labels) if len(outputs) >", "generated images s_tmp = cfg.TRAIN.NET_G istart = s_tmp.rfind('_') + 1", "if self.num_Ds > 2: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2]) mu2, covariance2", "if split_dir == 'test': split_dir = 'valid' netG = G_NET()", "data_loader self.num_batches = len(self.data_loader) def prepare_data(self, data): imgs, w_imgs, t_embedding,", "self.num_batches = len(self.data_loader) def prepare_data(self, data): imgs, w_imgs, t_embedding, _", "noise.data.resize_(batch_size, nz) noise.data.normal_(0, 1) fake_img_list = [] for i in", "i, 64) # break if cfg.TEST.B_EXAMPLE: # self.save_superimages(fake_img_list, filenames, #", "Compute the SAT loss after forwarding the SAT model for", "wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else: real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i])) return imgs, real_vimgs, wrong_vimgs, vembedding def", "* from tensorboard import summary from tensorboard import FileWriter from", "predictions = np.concatenate(predictions, 0) mean, std = compute_inception_score(predictions, 10) #", "save_real(self.imgs_tcpu, self.image_dir) ####################################################### # (1) Generate fake images ###################################################### noise.data.normal_(0,", "netsD = [] if cfg.TREE.BRANCH_NUM > 0: netsD.append(D_NET64()) if cfg.TREE.BRANCH_NUM", "return errD def train_Gnet(self, count): self.netG.zero_grad() errG_total = 0 flag", "batch_size = self.real_imgs[0].size(0) criterion, mu = self.criterion, self.mu netD, optD", "np.std(scores) def load_network(gpus): netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG,", "!= -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: nn.init.orthogonal(m.weight.data,", "errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(fake_logits[1], fake_labels) # errD_real =", "netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=gpus) print(netG) netsD = [] if", "s_gpus = cfg.GPU_ID.split(',') self.gpus = [int(ix) for ix in s_gpus]", "# self.fake_imgs[1].shape = [batch_size, 3, 128, 128] # self.fake_imgs[2].shape =", "= images[i].add(1).div(2).mul(255).clamp(0, 255).byte() ndarr = img.permute(1, 2, 0).data.cpu().numpy() im =", "flag == 0: summary_D = summary.scalar('D_loss%d' % idx, errD.item()) self.summary_writer.add_summary(summary_D,", "end_t = time.time() print('''[%d/%d][%d] Loss_D: %.2f Loss_G: %.2f Loss_KL: %.2f", "+ like_mu2 + like_cov2 if flag == 0: sum_mu =", "mean_nlpp) self.summary_writer.add_summary(m_nlpp, count) # predictions = [] end_t = time.time()", "t_embeddings = Variable(t_embeddings).cuda() else: t_embeddings = Variable(t_embeddings) # print(t_embeddings[:, 0,", "== 0: print ('Checking real samples at first...') save_real(self.imgs_tcpu, self.image_dir)", "mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1,", "data in enumerate(self.data_loader, 0): imgs, t_embeddings, filenames = data if", "return np.mean(scores), np.std(scores) def negative_log_posterior_probability(predictions, num_splits=1): # print('predictions', predictions.shape) scores", "i)) print('Save G/Ds models.') def save_real(imgs_tcpu, image_dir): num = cfg.TRAIN.VIS_COUNT", "training data ###################################################### self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \\ self.txt_embedding = self.prepare_data(data)", "if flag == 0: sum_mu = summary.scalar('G_like_mu1', like_mu1.item()) self.summary_writer.add_summary(sum_mu, count)", "like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\ nn.MSELoss()(covariance1, covariance2) errG_total", "Variable import torch.optim as optim import torchvision.utils as vutils import", "\\ self.txt_embedding = self.prepare_data(data) # Testing line for real samples", "= errD_real + 0.5 * (errD_wrong + errD_fake) # backward", "len(netsD), inception_model, count def define_optimizers(netG, netsD): optimizersD = [] num_Ds", "width = img.size(3) num_pixels = height * width # batch_size", "0 for i in range(self.num_Ds): errD = self.train_Dnet(i, count) errD_total", "in range(self.num_Ds): errD = self.train_Dnet(i, count) errD_total += errD #######################################################", "# (2) Update D network ###################################################### errD_total = 0 for", "the path for morels is not found!') else: # Build", "not os.path.isdir(folder): print('Make a new folder: ', folder) mkdir_p(folder) fullpath", "= netD(fake_imgs.detach(), mu.detach()) # errD_real = criterion(real_logits[0], real_labels) errD_wrong =", "# print('mean:', mean, 'std', std) m_incep = summary.scalar('Inception_mean', mean) self.summary_writer.add_summary(m_incep,", "= torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i)) netsD[i].load_state_dict(state_dict) inception_model = INCEPTION_V3() if", "* cfg.TRAIN.COEFF.KL errG_total = errG_total + kl_loss # Postpone the", "if cfg.TRAIN.FLAG: self.model_dir = os.path.join(output_dir, 'Model') self.image_dir = os.path.join(output_dir, 'Image')", "SNAPSHOT_INTERVAL: ###################################################### if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: save_model(self.netG, avg_param_G,", "for step, data in enumerate(self.data_loader, 0): imgs, t_embeddings, filenames =", "1)) scores.append(np.exp(kl)) return np.mean(scores), np.std(scores) def negative_log_posterior_probability(predictions, num_splits=1): # print('predictions',", "print('# of netsD', len(netsD)) count = 0 if cfg.TRAIN.NET_G !=", "CaptionDatasets import * from tensorboard import summary from tensorboard import", "NUM_BRANCHES # self.fake_imgs[0].shape = [batch_size, 3, 64, 64] # self.fake_imgs[1].shape", "num_pixels = height * width # batch_size * channel_num *", "noise = Variable(torch.FloatTensor(self.batch_size, nz)) if cfg.CUDA: netG.cuda() noise = noise.cuda()", "0)) fake_img_set = (fake_img_set + 1) * 255 / 2", "################### def compute_mean_covariance(img): batch_size = img.size(0) channel_num = img.size(1) height", "+ errD_fake_uncond # errD = errD_real + errD_wrong + errD_fake", "fake images ###################################################### noise.data.normal_(0, 1) self.fake_imgs, self.mu, self.logvar = \\", "real_img (i.e., self.imgs_tcpu[i][0:num]) # is changed to [0, 1] by", "= vutils.make_grid(real_img).numpy() real_img_set = np.transpose(real_img_set, (1, 2, 0)) real_img_set =", "save_dir, split_dir, i, 256) # self.save_singleimages(fake_imgs[-2], filenames, # save_dir, split_dir,", "step, data in enumerate(self.data_loader, 0): for step, data in enumerate(zip(self.data_loader,", "1.0) if m.bias is not None: m.bias.data.fill_(0.0) def load_params(model, new_param):", "s_tmp = s_tmp[:s_tmp.rfind('/')] save_dir = '%s/iteration%d' % (s_tmp, iteration) nz", "count) sum_cov = summary.scalar('G_like_cov1', like_cov1.item()) self.summary_writer.add_summary(sum_cov, count) kl_loss = KL_loss(mu,", "if cfg.TRAIN.NET_D != '': for i in range(len(netsD)): print('Load %s_%d.pth'", "+ 1 iend = cfg.TRAIN.NET_G.rfind('.') count = cfg.TRAIN.NET_G[istart:iend] count =", "device_ids=self.gpus) print(netG) # state_dict = torch.load(cfg.TRAIN.NET_G) state_dict = \\ torch.load(cfg.TRAIN.NET_G,", "m.bias is not None: m.bias.data.fill_(0.0) def load_params(model, new_param): for p,", "imsize): for i in range(images.size(0)): s_tmp = '%s/single_samples/%s/%s' %\\ (save_dir,", "= self.real_imgs[idx] wrong_imgs = self.wrong_imgs[idx] fake_imgs = self.fake_imgs[idx] # netD.zero_grad()", "batch_size * channel_num * 1 * 1 mu = img.mean(2,", "cfg.TREE.BRANCH_NUM > 0: netsD.append(D_NET64()) if cfg.TREE.BRANCH_NUM > 1: netsD.append(D_NET128()) if", "path to save generated images s_tmp = cfg.TRAIN.NET_G istart =", "# Show, Attend, and Tell Dataloader train_loader = torch.utils.data.DataLoader( CaptionDataset(data_folder,", "KLD = torch.mean(KLD_element).mul_(-0.5) return KLD def weights_init(m): classname = m.__class__.__name__", "encoder/decoder structure for SAT model decoder = DecoderWithAttention(attention_dim=512, embed_dim=512, decoder_dim=512,", "= cfg.TRAIN.COEFF.UNCOND_LOSS *\\ criterion(outputs[1], real_labels) errG = errG + errG_patch", "channel_num img_hat_transpose = img_hat.transpose(1, 2) # batch_size * channel_num *", "KL_loss(mu, logvar): # -0.5 * sum(1 + log(sigma^2) - mu^2", "range(num_Ds): opt = optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999)) optimizersD.append(opt) # G_opt_paras", "%\\ (save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if not os.path.isdir(folder):", "np.log(np.max(part, 1)) result = np.mean(result) scores.append(result) return np.mean(scores), np.std(scores) def", "self.fake_imgs[idx] # netD.zero_grad() # Forward real_labels = self.real_labels[:batch_size] fake_labels =", "saved by create_input_files.py data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base name shared", "as j: word_map = json.load(j) # Define the encoder/decoder structure", "encoder/decoder decoder_optimizer.zero_grad() if encoder_optimizer is not None: encoder_optimizer.zero_grad() ####################################################### #", "'': print('Error: the path for morels is not found!') else:", "3, imsize, imsize) # print(img.size()) super_img.append(img) # break super_img =", "images ###################################################### noise.data.normal_(0, 1) self.fake_imgs, self.mu, self.logvar = \\ self.netG(noise,", "> 0: errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(real_logits[1], real_labels) errD_wrong_uncond", "optimizersD.append(opt) # G_opt_paras = [] # for p in netG.parameters():", "real_vimgs, wrong_vimgs, vembedding def train_Dnet(self, idx, count): flag = count", "= cfg.TRAIN.NET_G.rfind('_') + 1 iend = cfg.TRAIN.NET_G.rfind('.') count = cfg.TRAIN.NET_G[istart:iend]", "= cfg.TRAIN.NET_G.rfind('.') count = cfg.TRAIN.NET_G[istart:iend] count = int(count) + 1", "self.gradient_half = self.gradient_half.cuda() noise, fixed_noise = noise.cuda(), fixed_noise.cuda() predictions =", "not found!') else: # Build and load the generator if", "define_optimizers(netG, netsD): optimizersD = [] num_Ds = len(netsD) for i", "= summary.scalar('Inception_mean', mean) self.summary_writer.add_summary(m_incep, count) # mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions,", "__init__(self, output_dir, data_loader, imsize): if cfg.TRAIN.FLAG: self.model_dir = os.path.join(output_dir, 'Model')", "Data parameters data_folder = 'birds_output' # folder with data files", "data files saved by create_input_files.py data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base", "len(images_list) for i in range(batch_size): s_tmp = '%s/super/%s/%s' %\\ (save_dir,", "filenames = data if cfg.CUDA: t_embeddings = Variable(t_embeddings).cuda() else: t_embeddings", "100 == 0: summary_D = summary.scalar('D_loss', errD_total.item()) summary_G = summary.scalar('G_loss',", "2 fake_img_set = fake_img_set.astype(np.uint8) sup_fake_img = summary.image('fake_img%d' % i, fake_img_set)", "netG = torch.nn.DataParallel(netG, device_ids=gpus) print(netG) netsD = [] if cfg.TREE.BRANCH_NUM", "= self.criterion, self.mu, self.logvar real_labels = self.real_labels[:batch_size] for i in", "base name shared by data files normalize = transforms.Normalize(mean=[0.485, 0.456,", "sum_cov = summary.scalar('G_like_cov1', like_cov1.item()) self.summary_writer.add_summary(sum_cov, count) kl_loss = KL_loss(mu, logvar)", "kl = part * \\ (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))", "by data files normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,", "500: predictions = np.concatenate(predictions, 0) mean, std = compute_inception_score(predictions, 10)", "optimizerG, optimizersD def save_model(netG, avg_param_G, netsD, epoch, model_dir): load_params(netG, avg_param_G)", "64) # break if cfg.TEST.B_EXAMPLE: # self.save_superimages(fake_img_list, filenames, # save_dir,", "* channel_num img_hat_transpose = img_hat.transpose(1, 2) # batch_size * channel_num", "netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=gpus) print(netG) netsD", "cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(fake_logits[1], fake_labels) # errD_real = errD_real +", "count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)", "% (s_tmp, iteration) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz))", "(i.e., self.imgs_tcpu[i][0:num]) # is changed to [0, 1] by function", "Time: %.2fs ''' # D(real): %.4f D(wrong):%.4f D(fake) %.4f %", "sentenceID) # range from [-1, 1] to [0, 255] img", "-1. * np.log(np.max(part, 1)) result = np.mean(result) scores.append(result) return np.mean(scores),", "1] to [0, 255] img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte() ndarr =", "in range(num_sentences): img = images_list[j][i] # print(img.size()) img = img.view(1,", "for i in range(self.num_Ds): if cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else: real_vimgs.append(Variable(imgs[i]))", "fake_img_set.astype(np.uint8) sup_fake_img = summary.image('fake_img%d' % i, fake_img_set) summary_writer.add_summary(sup_fake_img, count) summary_writer.flush()", "predictions[istart:iend, :] result = -1. * np.log(np.max(part, 1)) result =", "sort_ind = decoder(img, caps, caplens) targets = caps_sorted[:, 1:] scores,", "compute_mean_covariance(img): batch_size = img.size(0) channel_num = img.size(1) height = img.size(2)", "2: netsD.append(D_NET256()) if cfg.TREE.BRANCH_NUM > 3: netsD.append(D_NET512()) if cfg.TREE.BRANCH_NUM >", "the path to save generated images s_tmp = cfg.TRAIN.NET_G istart", "for p in netG.parameters(): # if p.requires_grad: # G_opt_paras.append(p) optimizerG", "= cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) if cfg.CUDA: netG.cuda() noise", "0): for step, data in enumerate(zip(self.data_loader, train_loader), 0): data_1 =", "nz)) fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1)) # Data parameters data_folder", "return kl_loss, errG_total def train(self): self.netG, self.netsD, self.num_Ds,\\ self.inception_model, start_count", "count def define_optimizers(netG, netsD): optimizersD = [] num_Ds = len(netsD)", "% 100 batch_size = self.real_imgs[0].size(0) criterion, mu, logvar = self.criterion,", "'CUB_5_cap_per_img_5_min_word_freq' # base name shared by data files normalize =", "> 1: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1]) mu2, covariance2 = \\", "self.gradient_one.cuda() self.gradient_half = self.gradient_half.cuda() noise, fixed_noise = noise.cuda(), fixed_noise.cuda() predictions", "encoder_optimizer is not None: encoder_optimizer.zero_grad() ####################################################### # (2) Update D", "load_network(self.gpus) avg_param_G = copy_G_params(self.netG) self.optimizerG, self.optimizersD = \\ define_optimizers(self.netG, self.netsD)", "1 if cfg.TRAIN.NET_D != '': for i in range(len(netsD)): print('Load", "None: m.bias.data.fill_(0.0) def load_params(model, new_param): for p, new_p in zip(model.parameters(),", "optD = self.netsD[idx], self.optimizersD[idx] real_imgs = self.real_imgs[idx] wrong_imgs = self.wrong_imgs[idx]", "s_tmp = '%s/super/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')]", "-1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: nn.init.orthogonal(m.weight.data, 1.0)", "sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.mean(KLD_element).mul_(-0.5) return KLD def", "is still [-1. 1]... vutils.save_image( fake_img.data, '%s/count_%09d_fake_samples_%d.png' % (image_dir, count,", "real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) def save_img_results(imgs_tcpu, fake_imgs, num_imgs, count,", "cfg.CUDA: self.criterion.cuda() self.SATcriterion.cuda() # Compute SATloss self.real_labels = self.real_labels.cuda() self.fake_labels", "2, 0)) real_img_set = real_img_set * 255 real_img_set = real_img_set.astype(np.uint8)", "real_labels) errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(wrong_logits[1], real_labels) errD_fake_uncond =", "errG_total def train(self): self.netG, self.netsD, self.num_Ds,\\ self.inception_model, start_count = load_network(self.gpus)", "i * predictions.shape[0] // num_splits iend = (i + 1)", "self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 128) self.save_superimages(fake_img_list, filenames, save_dir, split_dir,", "real_labels) errG = errG + errG_patch errG_total = errG_total +", "netG(noise, t_embeddings[:, i, :]) if cfg.TEST.B_EXAMPLE: # fake_img_list.append(fake_imgs[0].data.cpu()) # fake_img_list.append(fake_imgs[1].data.cpu())", "cfg.TEST.B_EXAMPLE: # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 64) # self.save_superimages(fake_img_list,", "= INCEPTION_V3() if cfg.CUDA: netG.cuda() for i in range(len(netsD)): netsD[i].cuda()", "i in range(len(netsD)): netD = netsD[i] torch.save( netD.state_dict(), '%s/netD%d.pth' %", "pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda() SATloss += self.SATcriterion(scores, targets) + 1 *", "netsD[i].cuda() inception_model = inception_model.cuda() inception_model.eval() return netG, netsD, len(netsD), inception_model,", "netsD.append(D_NET1024()) # TODO: if cfg.TREE.BRANCH_NUM > 5: for i in", "img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte() ndarr = img.permute(1, 2, 0).data.cpu().numpy() im", "self.summary_writer) # load_params(self.netG, backup_para) # Compute inception score if len(predictions)", "real_labels) errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(fake_logits[1], fake_labels) # errD_real", "errG_total += SATloss errG_total.backward() self.optimizerG.step() ####################################################### # (*) Update SAT", "Compute inception score if len(predictions) > 500: predictions = np.concatenate(predictions,", "super_img.append(img) # break super_img = torch.cat(super_img, 0) vutils.save_image(super_img, savename, nrow=10,", "= data if cfg.CUDA: t_embeddings = Variable(t_embeddings).cuda() else: t_embeddings =", "'Model') self.image_dir = os.path.join(output_dir, 'Image') self.log_dir = os.path.join(output_dir, 'Log') mkdir_p(self.model_dir)", "netD.state_dict(), '%s/netD%d.pth' % (model_dir, i)) print('Save G/Ds models.') def save_real(imgs_tcpu,", "+ 1 if cfg.TRAIN.NET_D != '': for i in range(len(netsD)):", "# folder with data files saved by create_input_files.py data_name =", "real samples at first...') save_real(self.imgs_tcpu, self.image_dir) ####################################################### # (1) Generate", "by create_input_files.py data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base name shared by", "device_ids=gpus) print(netG) netsD = [] if cfg.TREE.BRANCH_NUM > 0: netsD.append(D_NET64())", "range(self.num_Ds): errD = self.train_Dnet(i, count) errD_total += errD ####################################################### #", "self.wrong_imgs[idx] fake_imgs = self.fake_imgs[idx] # netD.zero_grad() # Forward real_labels =", "/ num_pixels return mu, covariance def KL_loss(mu, logvar): # -0.5", "def compute_mean_covariance(img): batch_size = img.size(0) channel_num = img.size(1) height =", "in range(embedding_dim): fake_imgs, _, _ = netG(noise, t_embeddings[:, i, :])", "'WORDMAP_' + data_name + '.json') with open(word_map_file, 'r') as j:", "Compute SATloss self.real_labels = self.real_labels.cuda() self.fake_labels = self.fake_labels.cuda() self.gradient_one =", "channel_num * num_pixels img_hat = img - mu.expand_as(img) img_hat =", "model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3 #", "images, filenames, save_dir, split_dir, sentenceID, imsize): for i in range(images.size(0)):", "from [-1, 1] to [0, 255] img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte()", "optimizersD = [] num_Ds = len(netsD) for i in range(num_Ds):", "save_dir = '%s/iteration%d' % (s_tmp, iteration) nz = cfg.GAN.Z_DIM noise", "for i in range(len(netsD)): netsD[i].apply(weights_init) netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus) #", "if cfg.TEST.B_EXAMPLE: # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 64) #", "'.json') with open(word_map_file, 'r') as j: word_map = json.load(j) #", "p in model.parameters())) return flatten def compute_inception_score(predictions, num_splits=1): # print('predictions',", "= s_tmp.rfind('_') + 1 iend = s_tmp.rfind('.') iteration = int(s_tmp[istart:iend])", "> 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS *", "predictions.shape) scores = [] for i in range(num_splits): istart =", "if self.num_Ds > 1: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1]) mu2, covariance2", "errD_fake + errD_fake_uncond # errD = errD_real + errD_wrong +", "= decoder(img, caps, caplens) targets = caps_sorted[:, 1:] scores, _", "map word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json') with", "start_t = time.time() # for step, data in enumerate(self.data_loader, 0):", "errD_real + errD_wrong + errD_fake else: errD = errD_real +", "_ = netG(noise, t_embeddings[:, i, :]) if cfg.TEST.B_EXAMPLE: # fake_img_list.append(fake_imgs[0].data.cpu())", "i in range(len(netsD)): print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i)) state_dict =", "score: ###################################################### pred = self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy()) if count % 100", "= img_hat.view(batch_size, channel_num, num_pixels) # batch_size * num_pixels * channel_num", "self.gradient_half = torch.FloatTensor([0.5]) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz))", "self.fake_imgs, self.mu, self.logvar = \\ self.netG(noise, self.txt_embedding) # len(self.fake_imgs) =", "img.permute(1, 2, 0).data.cpu().numpy() im = Image.fromarray(ndarr) im.save(fullpath) def evaluate(self, split_dir):", "= self.fake_imgs[idx] # netD.zero_grad() # Forward real_labels = self.real_labels[:batch_size] fake_labels", "in enumerate(self.data_loader, 0): imgs, t_embeddings, filenames = data if cfg.CUDA:", "mkdir_p(self.model_dir) mkdir_p(self.image_dir) mkdir_p(self.log_dir) self.summary_writer = FileWriter(self.log_dir) s_gpus = cfg.GPU_ID.split(',') self.gpus", "p.data.copy_(new_p) def copy_G_params(model): flatten = deepcopy(list(p.data for p in model.parameters()))", "t_embeddings[:, i, :]) if cfg.TEST.B_EXAMPLE: # fake_img_list.append(fake_imgs[0].data.cpu()) # fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu())", "%s_%d.pth' % (cfg.TRAIN.NET_D, i)) state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i))", "# state_dict = torch.load(cfg.TRAIN.NET_G) state_dict = \\ torch.load(cfg.TRAIN.NET_G, map_location=lambda storage,", "(*) Prediction and Inception score: ###################################################### pred = self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy())", "cfg.GPU_ID.split(',') self.gpus = [int(ix) for ix in s_gpus] self.num_gpus =", "self.logvar real_labels = self.real_labels[:batch_size] for i in range(self.num_Ds): outputs =", "_, _ = self.netG(fixed_noise, self.txt_embedding) save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds, count, self.image_dir,", "t_embedding, _ = data real_vimgs, wrong_vimgs = [], [] if", "= self.criterion, self.mu netD, optD = self.netsD[idx], self.optimizersD[idx] real_imgs =", "from copy import deepcopy from miscc.config import cfg from miscc.utils", "nn.CrossEntropyLoss() self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one = torch.FloatTensor([1.0])", "forwarding the SAT model for idx in range(len(self.fake_imgs)): img =", "else: real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i])) return imgs, real_vimgs, wrong_vimgs, vembedding def train_Dnet(self,", "imgs, w_imgs, t_embedding, _ = data real_vimgs, wrong_vimgs = [],", "parameters data_folder = 'birds_output' # folder with data files saved", "noise.cuda() # switch to evaluate mode netG.eval() for step, data", "_ = self.netG(fixed_noise, self.txt_embedding) save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds, count, self.image_dir, self.summary_writer)", "== 0: summary_D = summary.scalar('D_loss', errD_total.item()) summary_G = summary.scalar('G_loss', errG_total.item())", "= netG(noise, t_embeddings[:, i, :]) if cfg.TEST.B_EXAMPLE: # fake_img_list.append(fake_imgs[0].data.cpu()) #", "import Encoder, DecoderWithAttention from torch.nn.utils.rnn import pack_padded_sequence fine_tune_encoder = False", "import summary from tensorboard import FileWriter from model import G_NET,", "(3) Update G network: maximize log(D(G(z))) ###################################################### kl_loss, errG_total =", "Variable(torch.FloatTensor(self.batch_size, nz)) if cfg.CUDA: netG.cuda() noise = noise.cuda() # switch", "vutils.make_grid(real_img).numpy() real_img_set = np.transpose(real_img_set, (1, 2, 0)) real_img_set = real_img_set", "# base name shared by data files normalize = transforms.Normalize(mean=[0.485,", "sentenceID, imsize): for i in range(images.size(0)): s_tmp = '%s/single_samples/%s/%s' %\\", "of real_img (i.e., self.imgs_tcpu[i][0:num]) # is changed to [0, 1]", "####################################################### # (3) Update G network: maximize log(D(G(z))) ###################################################### kl_loss,", "real real_logits = netD(real_imgs, mu.detach()) wrong_logits = netD(wrong_imgs, mu.detach()) fake_logits", "save_dir, split_dir, sentenceID, imsize): for i in range(images.size(0)): s_tmp =", "# is changed to [0, 1] by function vutils.save_image real_img", "torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i)) netsD[i].load_state_dict(state_dict) inception_model = INCEPTION_V3() if cfg.CUDA:", "G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3 # ################## Shared", "noise = Variable(torch.FloatTensor(self.batch_size, nz)) fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1)) #", "folder) mkdir_p(folder) fullpath = '%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID) #", "save_real(imgs_tcpu, image_dir): num = cfg.TRAIN.VIS_COUNT # The range of real_img", "255 / 2 fake_img_set = fake_img_set.astype(np.uint8) sup_fake_img = summary.image('fake_img%d' %", "num_splits iend = (i + 1) * predictions.shape[0] // num_splits", "print('Make a new folder: ', folder) mkdir_p(folder) # savename =", "% idx, errD.item()) self.summary_writer.add_summary(summary_D, count) return errD def train_Gnet(self, count):", "from tensorboard import FileWriter from model import G_NET, D_NET64, D_NET128,", "= Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one = torch.FloatTensor([1.0]) self.gradient_half = torch.FloatTensor([0.5]) nz =", "# Set zero_grad for encoder/decoder decoder_optimizer.zero_grad() if encoder_optimizer is not", "zip(self.netG.parameters(), avg_param_G): avg_p.mul_(0.999).add_(0.001, p.data) # Combine with G and SAT", "self.netsD[idx], self.optimizersD[idx] real_imgs = self.real_imgs[idx] wrong_imgs = self.wrong_imgs[idx] fake_imgs =", "self.save_singleimages(fake_imgs[-2], filenames, # save_dir, split_dir, i, 128) # self.save_singleimages(fake_imgs[-3], filenames,", "# Compute the SAT loss after forwarding the SAT model", "channel_num = img.size(1) height = img.size(2) width = img.size(3) num_pixels", "SAT network: ###################################################### # Update weights decoder_optimizer.step() if encoder_optimizer is", "= NUM_BRANCHES # self.fake_imgs[0].shape = [batch_size, 3, 64, 64] #", "in range(num_Ds): opt = optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999)) optimizersD.append(opt) #", "== start_epoch and step == 0: print ('Checking real samples", "0)) real_img_set = real_img_set * 255 real_img_set = real_img_set.astype(np.uint8) sup_real_img", "summary_D = summary.scalar('G_loss%d' % i, errG.item()) self.summary_writer.add_summary(summary_D, count) # Compute", "decoder_optimizer.zero_grad() if encoder_optimizer is not None: encoder_optimizer.zero_grad() ####################################################### # (2)", "predictions.shape[0] // num_splits part = predictions[istart:iend, :] result = -1.", "self.txt_embedding) # len(self.fake_imgs) = NUM_BRANCHES # self.fake_imgs[0].shape = [batch_size, 3,", "range(num_sentences): img = images_list[j][i] # print(img.size()) img = img.view(1, 3,", "wrong_imgs = self.wrong_imgs[idx] fake_imgs = self.fake_imgs[idx] # netD.zero_grad() # Forward", "i in range(num_Ds): opt = optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999)) optimizersD.append(opt)", "covariance1 = compute_mean_covariance(self.fake_imgs[-1]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2 =", "morels is not found!') else: # Build and load the", "Variable(t_embedding) for i in range(self.num_Ds): if cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda()) else:", "= s_tmp[:s_tmp.rfind('/')] if not os.path.isdir(folder): print('Make a new folder: ',", "self.imgs_tcpu[i][0:num]) # is changed to [0, 1] by function vutils.save_image", "optim import torchvision.utils as vutils import numpy as np import", "= Variable(t_embeddings) # print(t_embeddings[:, 0, :], t_embeddings.size(1)) embedding_dim = t_embeddings.size(1)", "# Define the encoder/decoder structure for SAT model decoder =", "logvar = self.criterion, self.mu, self.logvar real_labels = self.real_labels[:batch_size] for i", "num_sentences = len(images_list) for i in range(batch_size): s_tmp = '%s/super/%s/%s'", "= self.prepare_data(data) # Testing line for real samples if epoch", "cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(wrong_logits[1], real_labels) errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\", "errD_total = 0 for i in range(self.num_Ds): errD = self.train_Dnet(i,", "self.txt_embedding) save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds, count, self.image_dir, self.summary_writer) # load_params(self.netG, backup_para)", "height * width # batch_size * channel_num * 1 *", "from miscc.utils import mkdir_p from CaptionDatasets import * from tensorboard", "result = -1. * np.log(np.max(part, 1)) result = np.mean(result) scores.append(result)", "torch.utils.data.DataLoader( CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])), batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True) if", "caplens) targets = caps_sorted[:, 1:] scores, _ = pack_padded_sequence(scores, decode_lengths,", "img.view(1, 3, imsize, imsize) # print(img.size()) super_img.append(img) # break super_img", "summary.scalar('G_like_mu1', like_mu1.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov1', like_cov1.item()) self.summary_writer.add_summary(sum_cov, count)", "mean, 'std', std) m_incep = summary.scalar('Inception_mean', mean) self.summary_writer.add_summary(m_incep, count) #", "mu, logvar = self.criterion, self.mu, self.logvar real_labels = self.real_labels[:batch_size] for", "= self.fake_labels.cuda() self.gradient_one = self.gradient_one.cuda() self.gradient_half = self.gradient_half.cuda() noise, fixed_noise", "# (*) Update SAT network: ###################################################### # Update weights decoder_optimizer.step()", "self.max_epoch, self.num_batches, errD_total.item(), errG_total.item(), kl_loss.item(), end_t - start_t)) save_model(self.netG, avg_param_G,", "start_t)) save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) self.summary_writer.close() def save_superimages(self, images_list,", "// num_splits iend = (i + 1) * predictions.shape[0] //", "np.mean(scores), np.std(scores) def load_network(gpus): netG = G_NET() netG.apply(weights_init) netG =", "for i in range(len(netsD)): netsD[i].cuda() inception_model = inception_model.cuda() inception_model.eval() return", "= self.netsD[idx], self.optimizersD[idx] real_imgs = self.real_imgs[idx] wrong_imgs = self.wrong_imgs[idx] fake_imgs", "= torch.FloatTensor([1.0]) self.gradient_half = torch.FloatTensor([0.5]) nz = cfg.GAN.Z_DIM noise =", "if cfg.TREE.BRANCH_NUM > 5: for i in range(len(netsD)): netsD[i].apply(weights_init) netsD[i]", "> 2: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2]) mu2, covariance2 = \\", "self.num_batches, errD_total.item(), errG_total.item(), kl_loss.item(), end_t - start_t)) save_model(self.netG, avg_param_G, self.netsD,", "define_optimizers(self.netG, self.netsD) self.criterion = nn.BCELoss() self.SATcriterion = nn.CrossEntropyLoss() self.real_labels =", "folder = s_tmp[:s_tmp.rfind('/')] if not os.path.isdir(folder): print('Make a new folder:", "* sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD_element =", "(cfg.TRAIN.NET_D, i)) state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i)) netsD[i].load_state_dict(state_dict) inception_model", "path for morels is not found!') else: # Build and", "= [] for i in range(embedding_dim): fake_imgs, _, _ =", "= cfg.TRAIN.NET_G istart = s_tmp.rfind('_') + 1 iend = s_tmp.rfind('.')", "errG + errG_patch errG_total = errG_total + errG if flag", "None: encoder_optimizer.step() ####################################################### # (*) Prediction and Inception score: ######################################################", "[] for j in range(num_sentences): img = images_list[j][i] # print(img.size())", "import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3 # ##################", "prepare_data(self, data): imgs, w_imgs, t_embedding, _ = data real_vimgs, wrong_vimgs", "self.criterion, self.mu netD, optD = self.netsD[idx], self.optimizersD[idx] real_imgs = self.real_imgs[idx]", "imsize) # print(img.size()) super_img.append(img) # break super_img = torch.cat(super_img, 0)", "imsize, sentenceID) # range from [-1, 1] to [0, 255]", "fake_labels = self.fake_labels[:batch_size] # for real real_logits = netD(real_imgs, mu.detach())", "batch_size = self.real_imgs[0].size(0) criterion, mu, logvar = self.criterion, self.mu, self.logvar", "fake_img = fake_imgs[i][0:num] # The range of fake_img.data (i.e., self.fake_imgs[i][0:num])", "filenames, save_dir, split_dir, i, 256) # self.save_singleimages(fake_imgs[-2], filenames, # save_dir,", "s_gpus] self.num_gpus = len(self.gpus) torch.cuda.set_device(self.gpus[0]) cudnn.benchmark = True self.batch_size =", "m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: nn.init.orthogonal(m.weight.data, 1.0) if", "print('Load ', cfg.TRAIN.NET_G) istart = cfg.TRAIN.NET_G.rfind('_') + 1 iend =", "inception_model, count def define_optimizers(netG, netsD): optimizersD = [] num_Ds =", "fake_img_set = (fake_img_set + 1) * 255 / 2 fake_img_set", "= pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda() SATloss += self.SATcriterion(scores, targets) + 1", "noise, fixed_noise = noise.cuda(), fixed_noise.cuda() predictions = [] count =", "cudnn.benchmark = True self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus self.max_epoch =", "save_model(netG, avg_param_G, netsD, epoch, model_dir): load_params(netG, avg_param_G) torch.save( netG.state_dict(), '%s/netG_%d.pth'", "= real_img_set * 255 real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img',", "netG.state_dict(), '%s/netG_%d.pth' % (model_dir, epoch)) for i in range(len(netsD)): netD", "torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()), lr=1e-4) if fine_tune_encoder else None SATloss", "targets = caps_sorted[:, 1:] scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda()", "'%s/single_samples/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if not", "covariance1 = compute_mean_covariance(self.fake_imgs[-2]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1 =", "np.transpose(real_img_set, (1, 2, 0)) real_img_set = real_img_set * 255 real_img_set", "compute_mean_covariance(self.fake_imgs[-2]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS *", "new_param): p.data.copy_(new_p) def copy_G_params(model): flatten = deepcopy(list(p.data for p in", "cfg.TRAIN.NET_G.rfind('_') + 1 iend = cfg.TRAIN.NET_G.rfind('.') count = cfg.TRAIN.NET_G[istart:iend] count", "4: netsD.append(D_NET1024()) # TODO: if cfg.TREE.BRANCH_NUM > 5: for i", "= img.size(1) height = img.size(2) width = img.size(3) num_pixels =", "summary.scalar('G_like_mu2', like_mu2.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov2', like_cov2.item()) self.summary_writer.add_summary(sum_cov, count)", "for idx in range(len(self.fake_imgs)): img = encoder(self.fake_imgs[idx]) scores, caps_sorted, decode_lengths,", "mkdir_p(folder) # savename = '%s_%d.png' % (s_tmp, imsize) super_img =", "# self.save_singleimages(fake_imgs[-3], filenames, # save_dir, split_dir, i, 64) # break", "'%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID) # range from [-1, 1]", "self.netG.zero_grad() errG_total = 0 flag = count % 100 batch_size", "(1, 2, 0)) real_img_set = real_img_set * 255 real_img_set =", "decode_lengths, alphas, sort_ind = decoder(img, caps, caplens) targets = caps_sorted[:,", "# errG_total.backward() # self.optimizerG.step() return kl_loss, errG_total def train(self): self.netG,", "p: p.requires_grad, decoder.parameters()), lr=4e-4) encoder = Encoder().cuda() encoder.fine_tune(fine_tune_encoder) encoder_optimizer =", "100 batch_size = self.real_imgs[0].size(0) criterion, mu = self.criterion, self.mu netD,", "like_cov2 if flag == 0: sum_mu = summary.scalar('G_like_mu2', like_mu2.item()) self.summary_writer.add_summary(sum_mu,", "= img.size(3) num_pixels = height * width # batch_size *", "len(self.data_loader) def prepare_data(self, data): imgs, w_imgs, t_embedding, _ = data", "SATloss self.real_labels = self.real_labels.cuda() self.fake_labels = self.fake_labels.cuda() self.gradient_one = self.gradient_one.cuda()", "Inception score: ###################################################### pred = self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy()) if count %", "= torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()), lr=1e-4) if fine_tune_encoder else None", "G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=gpus) print(netG) netsD = []", "0: errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(real_logits[1], real_labels) errD_wrong_uncond =", "self.num_Ds,\\ self.inception_model, start_count = load_network(self.gpus) avg_param_G = copy_G_params(self.netG) self.optimizerG, self.optimizersD", "fake_img.data, '%s/count_%09d_fake_samples_%d.png' % (image_dir, count, i), normalize=True) fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy()", "propagation errG_total += SATloss errG_total.backward() self.optimizerG.step() ####################################################### # (*) Update", "inception_model.eval() return netG, netsD, len(netsD), inception_model, count def define_optimizers(netG, netsD):", "count) self.summary_writer.add_summary(summary_G, count) self.summary_writer.add_summary(summary_KL, count) count += 1 ####################################################### #", "* num_pixels img_hat = img - mu.expand_as(img) img_hat = img_hat.view(batch_size,", "if classname.find('Conv') != -1: nn.init.orthogonal(m.weight.data, 1.0) elif classname.find('BatchNorm') != -1:", "torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) istart = cfg.TRAIN.NET_G.rfind('_') + 1", "criterion, mu = self.criterion, self.mu netD, optD = self.netsD[idx], self.optimizersD[idx]", "mu.expand_as(img) img_hat = img_hat.view(batch_size, channel_num, num_pixels) # batch_size * num_pixels", "part = predictions[istart:iend, :] kl = part * \\ (np.log(part)", "Update G network: maximize log(D(G(z))) ###################################################### kl_loss, errG_total = self.train_Gnet(count)", "(s_tmp, imsize) super_img = [] for j in range(num_sentences): img", "''' # D(real): %.4f D(wrong):%.4f D(fake) %.4f % (epoch, self.max_epoch,", "if flag == 0: sum_mu = summary.scalar('G_like_mu2', like_mu2.item()) self.summary_writer.add_summary(sum_mu, count)", "num_splits part = predictions[istart:iend, :] kl = part * \\", "% (epoch, self.max_epoch, self.num_batches, errD_total.item(), errG_total.item(), kl_loss.item(), end_t - start_t))", "= covariance / num_pixels return mu, covariance def KL_loss(mu, logvar):", "The range of real_img (i.e., self.imgs_tcpu[i][0:num]) # is changed to", "pack_padded_sequence fine_tune_encoder = False # Read word map word_map_file =", "self.netsD, self.num_Ds,\\ self.inception_model, start_count = load_network(self.gpus) avg_param_G = copy_G_params(self.netG) self.optimizerG,", "Prediction and Inception score: ###################################################### pred = self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy()) if", "np.log(np.expand_dims(np.mean(part, 0), 0))) kl = np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return np.mean(scores),", "summary_G = summary.scalar('G_loss', errG_total.item()) summary_KL = summary.scalar('KL_loss', kl_loss.item()) self.summary_writer.add_summary(summary_D, count)", "load_params(model, new_param): for p, new_p in zip(model.parameters(), new_param): p.data.copy_(new_p) def", "= img - mu.expand_as(img) img_hat = img_hat.view(batch_size, channel_num, num_pixels) #", "10) m_nlpp = summary.scalar('NLPP_mean', mean_nlpp) self.summary_writer.add_summary(m_nlpp, count) # predictions =", "save_dir, split_dir, i, 64) # break if cfg.TEST.B_EXAMPLE: # self.save_superimages(fake_img_list,", "* 5 * \\ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total +", "3: netsD.append(D_NET512()) if cfg.TREE.BRANCH_NUM > 4: netsD.append(D_NET1024()) # TODO: if", "int(count) + 1 if cfg.TRAIN.NET_D != '': for i in", "* \\ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu2 +", "0): imgs, t_embeddings, filenames = data if cfg.CUDA: t_embeddings =", "# ################# Text to image task############################ # class condGANTrainer(object): def", "1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\", "torch.FloatTensor([1.0]) self.gradient_half = torch.FloatTensor([0.5]) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size,", "import * from tensorboard import summary from tensorboard import FileWriter", "std=[0.229, 0.224, 0.225]) # Show, Attend, and Tell Dataloader train_loader", "kl_loss, errG_total def train(self): self.netG, self.netsD, self.num_Ds,\\ self.inception_model, start_count =", "loc: storage) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) # the path to", "like_cov2.item()) self.summary_writer.add_summary(sum_cov, count) if self.num_Ds > 2: mu1, covariance1 =", "super_img = [] for j in range(num_sentences): img = images_list[j][i]", "cfg.TRAIN.FLAG: self.model_dir = os.path.join(output_dir, 'Model') self.image_dir = os.path.join(output_dir, 'Image') self.log_dir", "cfg.TRAIN.SNAPSHOT_INTERVAL == 0: save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) # Save", "# print(netsD[i]) print('# of netsD', len(netsD)) count = 0 if", "'%s/iteration%d' % (s_tmp, iteration) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size,", "= False # Read word map word_map_file = os.path.join(data_folder, 'WORDMAP_'", "'Image') self.log_dir = os.path.join(output_dir, 'Log') mkdir_p(self.model_dir) mkdir_p(self.image_dir) mkdir_p(self.log_dir) self.summary_writer =", "0: sum_mu = summary.scalar('G_like_mu2', like_mu2.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov2',", "False # Read word map word_map_file = os.path.join(data_folder, 'WORDMAP_' +", "0: sum_mu = summary.scalar('G_like_mu1', like_mu1.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov1',", "if cfg.CUDA: netG.cuda() for i in range(len(netsD)): netsD[i].cuda() inception_model =", "5 * \\ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu1", "= real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) def save_img_results(imgs_tcpu, fake_imgs, num_imgs,", "= imgs_tcpu[-1][0:num] vutils.save_image( real_img, '%s/real_samples.png' % (image_dir), normalize=True) real_img_set =", "# self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 64) # self.save_superimages(fake_img_list, filenames,", "errG = errG + errG_patch errG_total = errG_total + errG", "wrong_logits = netD(wrong_imgs, mu.detach()) fake_logits = netD(fake_imgs.detach(), mu.detach()) # errD_real", "num_pixels return mu, covariance def KL_loss(mu, logvar): # -0.5 *", "Build and load the generator if split_dir == 'test': split_dir", "% (model_dir, epoch)) for i in range(len(netsD)): netD = netsD[i]", "in zip(self.netG.parameters(), avg_param_G): avg_p.mul_(0.999).add_(0.001, p.data) # Combine with G and", "The range of fake_img.data (i.e., self.fake_imgs[i][0:num]) # is still [-1.", "class condGANTrainer(object): def __init__(self, output_dir, data_loader, imsize): if cfg.TRAIN.FLAG: self.model_dir", "count) # mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10) m_nlpp = summary.scalar('NLPP_mean',", "p.requires_grad, decoder.parameters()), lr=4e-4) encoder = Encoder().cuda() encoder.fine_tune(fine_tune_encoder) encoder_optimizer = torch.optim.Adam(params=filter(lambda", "time from PIL import Image, ImageFont, ImageDraw from copy import", "weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.orthogonal(m.weight.data, 1.0)", "PIL import Image, ImageFont, ImageDraw from copy import deepcopy from", "range(len(self.fake_imgs)): img = encoder(self.fake_imgs[idx]) scores, caps_sorted, decode_lengths, alphas, sort_ind =", "return netG, netsD, len(netsD), inception_model, count def define_optimizers(netG, netsD): optimizersD", "= Variable(t_embedding) for i in range(self.num_Ds): if cfg.CUDA: real_vimgs.append(Variable(imgs[i]).cuda()) wrong_vimgs.append(Variable(w_imgs[i]).cuda())", "= cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(fake_logits[1], fake_labels) # errD_real = errD_real", "SAT ###################################################### from SATmodels import Encoder, DecoderWithAttention from torch.nn.utils.rnn import", "criterion(wrong_logits[1], real_labels) errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(fake_logits[1], fake_labels) #", "covariance2 = \\ compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2)", "import mkdir_p from CaptionDatasets import * from tensorboard import summary", "fullpath = '%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID) # range from", "# Build and load the generator if split_dir == 'test':", "and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\\ criterion(outputs[1], real_labels)", "the generator if split_dir == 'test': split_dir = 'valid' netG", "= summary.scalar('KL_loss', kl_loss.item()) self.summary_writer.add_summary(summary_D, count) self.summary_writer.add_summary(summary_G, count) self.summary_writer.add_summary(summary_KL, count) count", "= cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader = data_loader self.num_batches = len(self.data_loader) def prepare_data(self,", "models.') def save_real(imgs_tcpu, image_dir): num = cfg.TRAIN.VIS_COUNT # The range", "0.999)) return optimizerG, optimizersD def save_model(netG, avg_param_G, netsD, epoch, model_dir):", "%.2f Loss_G: %.2f Loss_KL: %.2f Time: %.2fs ''' # D(real):", "0: save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) # Save images backup_para", "def save_real(imgs_tcpu, image_dir): num = cfg.TRAIN.VIS_COUNT # The range of", "(1) Generate fake images ###################################################### noise.data.normal_(0, 1) self.fake_imgs, self.mu, self.logvar", "def save_superimages(self, images_list, filenames, save_dir, split_dir, imsize): batch_size = images_list[0].size(0)", "normalize=True) real_img_set = vutils.make_grid(real_img).numpy() real_img_set = np.transpose(real_img_set, (1, 2, 0))", "netD = netsD[i] torch.save( netD.state_dict(), '%s/netD%d.pth' % (model_dir, i)) print('Save", "SAT first, then back propagation errG_total += SATloss errG_total.backward() self.optimizerG.step()", "self.data_loader = data_loader self.num_batches = len(self.data_loader) def prepare_data(self, data): imgs,", "num_splits part = predictions[istart:iend, :] result = -1. * np.log(np.max(part,", "self.logvar = \\ self.netG(noise, self.txt_embedding) # len(self.fake_imgs) = NUM_BRANCHES #", "errG_total = errG_total + like_mu2 + like_cov2 if flag ==", "_ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda() SATloss += self.SATcriterion(scores, targets) +", "0 # Compute the SAT loss after forwarding the SAT", "name shared by data files normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],", "print(img.size()) img = img.view(1, 3, imsize, imsize) # print(img.size()) super_img.append(img)", "** 2).mean() # Set zero_grad for encoder/decoder decoder_optimizer.zero_grad() if encoder_optimizer", "load the generator if split_dir == 'test': split_dir = 'valid'", "wrong_vimgs, vembedding def train_Dnet(self, idx, count): flag = count %", "s_tmp = cfg.TRAIN.NET_G istart = s_tmp.rfind('_') + 1 iend =", "errD_fake = criterion(fake_logits[0], fake_labels) if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS", "255 real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) summary_writer.add_summary(sup_real_img, count)", "= Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1)) # Data parameters data_folder = 'birds_output'", "normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Show,", "img_hat_transpose) covariance = covariance / num_pixels return mu, covariance def", "# mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10) m_nlpp = summary.scalar('NLPP_mean', mean_nlpp)", "# savename = '%s_%d.png' % (s_tmp, imsize) super_img = []", "FileWriter(self.log_dir) s_gpus = cfg.GPU_ID.split(',') self.gpus = [int(ix) for ix in", "embedding_dim = t_embeddings.size(1) batch_size = imgs[0].size(0) noise.data.resize_(batch_size, nz) noise.data.normal_(0, 1)", "= self.real_labels[:batch_size] fake_labels = self.fake_labels[:batch_size] # for real real_logits =", "from __future__ import print_function from six.moves import range import torchvision.transforms", "+= errD ####################################################### # (3) Update G network: maximize log(D(G(z)))", "imgs, real_vimgs, wrong_vimgs, vembedding def train_Dnet(self, idx, count): flag =", "len(netsD)) count = 0 if cfg.TRAIN.NET_G != '': state_dict =", "lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999)) return optimizerG, optimizersD def save_model(netG, avg_param_G, netsD,", "# the path to save generated images s_tmp = cfg.TRAIN.NET_G", "cudnn import torch import torch.nn as nn from torch.autograd import", "covariance2 = \\ compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2)", "= [batch_size, 3, 128, 128] # self.fake_imgs[2].shape = [batch_size, 3,", "std_nlpp = negative_log_posterior_probability(predictions, 10) m_nlpp = summary.scalar('NLPP_mean', mean_nlpp) self.summary_writer.add_summary(m_nlpp, count)", "network: ###################################################### # Update weights decoder_optimizer.step() if encoder_optimizer is not", "count) self.summary_writer.add_summary(summary_KL, count) count += 1 ####################################################### # (*) Save", "= \\ compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov2", "self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov2', like_cov2.item()) self.summary_writer.add_summary(sum_cov, count) if self.num_Ds", "imgs, t_embeddings, filenames = data if cfg.CUDA: t_embeddings = Variable(t_embeddings).cuda()", "cfg.TRAIN.BATCH_SIZE * self.num_gpus self.max_epoch = cfg.TRAIN.MAX_EPOCH self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader", "load_params(self.netG, backup_para) # Compute inception score if len(predictions) > 500:", "summary_KL = summary.scalar('KL_loss', kl_loss.item()) self.summary_writer.add_summary(summary_D, count) self.summary_writer.add_summary(summary_G, count) self.summary_writer.add_summary(summary_KL, count)", "like_mu1.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov1', like_cov1.item()) self.summary_writer.add_summary(sum_cov, count) kl_loss", "self.netG, self.netsD, self.num_Ds,\\ self.inception_model, start_count = load_network(self.gpus) avg_param_G = copy_G_params(self.netG)", "Save Images/Log/Model per SNAPSHOT_INTERVAL: ###################################################### if count % cfg.TRAIN.SNAPSHOT_INTERVAL ==", "netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=self.gpus) print(netG) #", "sup_real_img = summary.image('real_img', real_img_set) summary_writer.add_summary(sup_real_img, count) for i in range(num_imgs):", "for i in range(self.num_Ds): outputs = self.netsD[i](self.fake_imgs[i], mu) errG =", "% (image_dir), normalize=True) real_img_set = vutils.make_grid(real_img).numpy() real_img_set = np.transpose(real_img_set, (1,", "criterion(real_logits[1], real_labels) errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(wrong_logits[1], real_labels) errD_fake_uncond", "for i in range(num_Ds): opt = optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999))", "= netD(real_imgs, mu.detach()) wrong_logits = netD(wrong_imgs, mu.detach()) fake_logits = netD(fake_imgs.detach(),", "netsD): optimizersD = [] num_Ds = len(netsD) for i in", "len(predictions) > 500: predictions = np.concatenate(predictions, 0) mean, std =", "predictions.shape[0] // num_splits part = predictions[istart:iend, :] kl = part", "avg_param_G) torch.save( netG.state_dict(), '%s/netG_%d.pth' % (model_dir, epoch)) for i in", "cfg.TRAIN.NET_G.rfind('.') count = cfg.TRAIN.NET_G[istart:iend] count = int(count) + 1 if", "count) # Compute color consistency losses if cfg.TRAIN.COEFF.COLOR_LOSS > 0:", "is not None: m.bias.data.fill_(0.0) def load_params(model, new_param): for p, new_p", "SATmodels import Encoder, DecoderWithAttention from torch.nn.utils.rnn import pack_padded_sequence fine_tune_encoder =", "self.real_labels[:batch_size] fake_labels = self.fake_labels[:batch_size] # for real real_logits = netD(real_imgs,", "= cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\ nn.MSELoss()(covariance1, covariance2) errG_total =", "for real samples if epoch == start_epoch and step ==", "# save_dir, split_dir, i, 64) # break if cfg.TEST.B_EXAMPLE: #", "for i in range(num_splits): istart = i * predictions.shape[0] //", "np.concatenate(predictions, 0) mean, std = compute_inception_score(predictions, 10) # print('mean:', mean,", "= self.fake_labels[:batch_size] # for real real_logits = netD(real_imgs, mu.detach()) wrong_logits", "# The range of real_img (i.e., self.imgs_tcpu[i][0:num]) # is changed", "# (*) Save Images/Log/Model per SNAPSHOT_INTERVAL: ###################################################### if count %", "real_img, '%s/real_samples.png' % (image_dir), normalize=True) real_img_set = vutils.make_grid(real_img).numpy() real_img_set =", "if not os.path.isdir(folder): print('Make a new folder: ', folder) mkdir_p(folder)", "to image task############################ # class condGANTrainer(object): def __init__(self, output_dir, data_loader,", "if cfg.TREE.BRANCH_NUM > 1: netsD.append(D_NET128()) if cfg.TREE.BRANCH_NUM > 2: netsD.append(D_NET256())", "= start_count start_epoch = start_count // (self.num_batches) for epoch in", "# Postpone the backward propagation # errG_total.backward() # self.optimizerG.step() return", "netsD[i].load_state_dict(state_dict) inception_model = INCEPTION_V3() if cfg.CUDA: netG.cuda() for i in", "'Log') mkdir_p(self.model_dir) mkdir_p(self.image_dir) mkdir_p(self.log_dir) self.summary_writer = FileWriter(self.log_dir) s_gpus = cfg.GPU_ID.split(',')", "= img.view(1, 3, imsize, imsize) # print(img.size()) super_img.append(img) # break", "import time from PIL import Image, ImageFont, ImageDraw from copy", "!= -1: nn.init.orthogonal(m.weight.data, 1.0) if m.bias is not None: m.bias.data.fill_(0.0)", "break super_img = torch.cat(super_img, 0) vutils.save_image(super_img, savename, nrow=10, normalize=True) def", "cfg.TRAIN.NET_G[istart:iend] count = int(count) + 1 if cfg.TRAIN.NET_D != '':", "m.bias.data.fill_(0) elif classname.find('Linear') != -1: nn.init.orthogonal(m.weight.data, 1.0) if m.bias is", "covariance = torch.bmm(img_hat, img_hat_transpose) covariance = covariance / num_pixels return", "range of fake_img.data (i.e., self.fake_imgs[i][0:num]) # is still [-1. 1]...", "self.fake_labels[:batch_size] # for real real_logits = netD(real_imgs, mu.detach()) wrong_logits =", "if epoch == start_epoch and step == 0: print ('Checking", "= torch.load(cfg.TRAIN.NET_G) state_dict = \\ torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage)", "-1: nn.init.orthogonal(m.weight.data, 1.0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0)", "nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) fixed_noise = Variable(torch.FloatTensor(self.batch_size,", "[] if cfg.CUDA: vembedding = Variable(t_embedding).cuda() else: vembedding = Variable(t_embedding)", "+ like_cov2 if flag == 0: sum_mu = summary.scalar('G_like_mu2', like_mu2.item())", "###################################################### kl_loss, errG_total = self.train_Gnet(count) for p, avg_p in zip(self.netG.parameters(),", "\\ criterion(wrong_logits[1], real_labels) errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(fake_logits[1], fake_labels)", "avg_p in zip(self.netG.parameters(), avg_param_G): avg_p.mul_(0.999).add_(0.001, p.data) # Combine with G", "load_params(netG, avg_param_G) torch.save( netG.state_dict(), '%s/netG_%d.pth' % (model_dir, epoch)) for i", "mode netG.eval() for step, data in enumerate(self.data_loader, 0): imgs, t_embeddings,", "in range(self.num_Ds): outputs = self.netsD[i](self.fake_imgs[i], mu) errG = criterion(outputs[0], real_labels)", "Read word map word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name +", "# Compute inception score if len(predictions) > 500: predictions =", "transform=transforms.Compose([normalize])), batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True) if cfg.CUDA: self.criterion.cuda() self.SATcriterion.cuda() #", "kl_loss.item(), end_t - start_t)) save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) self.summary_writer.close()", "cfg.CUDA: netG.cuda() for i in range(len(netsD)): netsD[i].cuda() inception_model = inception_model.cuda()", "self.netG(fixed_noise, self.txt_embedding) save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds, count, self.image_dir, self.summary_writer) # load_params(self.netG,", "and SAT first, then back propagation errG_total += SATloss errG_total.backward()", "64) # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 128) self.save_superimages(fake_img_list, filenames,", "i)) state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i)) netsD[i].load_state_dict(state_dict) inception_model =", "cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1))", "of fake_img.data (i.e., self.fake_imgs[i][0:num]) # is still [-1. 1]... vutils.save_image(", "predictions.shape[0] // num_splits iend = (i + 1) * predictions.shape[0]", "mu) errG = criterion(outputs[0], real_labels) if len(outputs) > 1 and", "torchvision.utils as vutils import numpy as np import os import", "###################################################### pred = self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy()) if count % 100 ==", "covariance2) errG_total = errG_total + like_mu1 + like_cov1 if flag", "imsize): if cfg.TRAIN.FLAG: self.model_dir = os.path.join(output_dir, 'Model') self.image_dir = os.path.join(output_dir,", "= Encoder().cuda() encoder.fine_tune(fine_tune_encoder) encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()), lr=1e-4)", "if flag == 0: summary_D = summary.scalar('G_loss%d' % i, errG.item())", "netD(real_imgs, mu.detach()) wrong_logits = netD(wrong_imgs, mu.detach()) fake_logits = netD(fake_imgs.detach(), mu.detach())", "+ errD_fake) # backward errD.backward() # update parameters optD.step() #", "torch.cat(super_img, 0) vutils.save_image(super_img, savename, nrow=10, normalize=True) def save_singleimages(self, images, filenames,", "self.real_labels = self.real_labels.cuda() self.fake_labels = self.fake_labels.cuda() self.gradient_one = self.gradient_one.cuda() self.gradient_half", "avg_param_G, self.netsD, count, self.model_dir) self.summary_writer.close() def save_superimages(self, images_list, filenames, save_dir,", "= self.real_imgs[0].size(0) criterion, mu = self.criterion, self.mu netD, optD =", "= compute_mean_covariance(self.fake_imgs[-1]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS", "for SAT model decoder = DecoderWithAttention(attention_dim=512, embed_dim=512, decoder_dim=512, vocab_size=len(word_map), dropout=0.5).cuda()", "a new folder: ', folder) mkdir_p(folder) fullpath = '%s_%d_sentence%d.png' %", "i, :]) if cfg.TEST.B_EXAMPLE: # fake_img_list.append(fake_imgs[0].data.cpu()) # fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu()) else:", "'%s/real_samples.png' % (image_dir), normalize=True) real_img_set = vutils.make_grid(real_img).numpy() real_img_set = np.transpose(real_img_set,", "DecoderWithAttention from torch.nn.utils.rnn import pack_padded_sequence fine_tune_encoder = False # Read", "avg_param_G = copy_G_params(self.netG) self.optimizerG, self.optimizersD = \\ define_optimizers(self.netG, self.netsD) self.criterion", "std) m_incep = summary.scalar('Inception_mean', mean) self.summary_writer.add_summary(m_incep, count) # mean_nlpp, std_nlpp", "= torch.cat(super_img, 0) vutils.save_image(super_img, savename, nrow=10, normalize=True) def save_singleimages(self, images,", "= summary.scalar('G_like_cov1', like_cov1.item()) self.summary_writer.add_summary(sum_cov, count) kl_loss = KL_loss(mu, logvar) *", "self.mu netD, optD = self.netsD[idx], self.optimizersD[idx] real_imgs = self.real_imgs[idx] wrong_imgs", "split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if not os.path.isdir(folder): print('Make a", "[-1. 1]... vutils.save_image( fake_img.data, '%s/count_%09d_fake_samples_%d.png' % (image_dir, count, i), normalize=True)", "cfg.TRAIN.NET_G istart = s_tmp.rfind('_') + 1 iend = s_tmp.rfind('.') iteration", "100 batch_size = self.real_imgs[0].size(0) criterion, mu, logvar = self.criterion, self.mu,", "Set zero_grad for encoder/decoder decoder_optimizer.zero_grad() if encoder_optimizer is not None:", "real_labels = self.real_labels[:batch_size] for i in range(self.num_Ds): outputs = self.netsD[i](self.fake_imgs[i],", "if cfg.CUDA: netG.cuda() noise = noise.cuda() # switch to evaluate", "score if len(predictions) > 500: predictions = np.concatenate(predictions, 0) mean,", "errD_wrong_uncond errD_fake = errD_fake + errD_fake_uncond # errD = errD_real", "= transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # Show, Attend,", "= os.path.join(output_dir, 'Model') self.image_dir = os.path.join(output_dir, 'Image') self.log_dir = os.path.join(output_dir,", "INCEPTION_V3 # ################## Shared functions ################### def compute_mean_covariance(img): batch_size =", "if cfg.TREE.BRANCH_NUM > 3: netsD.append(D_NET512()) if cfg.TREE.BRANCH_NUM > 4: netsD.append(D_NET1024())", "count) for i in range(num_imgs): fake_img = fake_imgs[i][0:num] # The", "real_img_set) def save_img_results(imgs_tcpu, fake_imgs, num_imgs, count, image_dir, summary_writer): num =", "0, :], t_embeddings.size(1)) embedding_dim = t_embeddings.size(1) batch_size = imgs[0].size(0) noise.data.resize_(batch_size,", "errG if flag == 0: summary_D = summary.scalar('G_loss%d' % i,", "flag == 0: sum_mu = summary.scalar('G_like_mu2', like_mu2.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov", "KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL errG_total = errG_total + kl_loss #", "savename = '%s_%d.png' % (s_tmp, imsize) super_img = [] for", "data_1 ####################################################### # (0) Prepare training data ###################################################### self.imgs_tcpu, self.real_imgs,", "+ kl_loss # Postpone the backward propagation # errG_total.backward() #", "= Image.fromarray(ndarr) im.save(fullpath) def evaluate(self, split_dir): if cfg.TRAIN.NET_G == '':", "img = img.view(1, 3, imsize, imsize) # print(img.size()) super_img.append(img) #", "def train_Dnet(self, idx, count): flag = count % 100 batch_size", "2).mean() # Set zero_grad for encoder/decoder decoder_optimizer.zero_grad() if encoder_optimizer is", "for j in range(num_sentences): img = images_list[j][i] # print(img.size()) img", "miscc.config import cfg from miscc.utils import mkdir_p from CaptionDatasets import", "errD_fake) # backward errD.backward() # update parameters optD.step() # log", "Define the encoder/decoder structure for SAT model decoder = DecoderWithAttention(attention_dim=512,", "img_hat = img - mu.expand_as(img) img_hat = img_hat.view(batch_size, channel_num, num_pixels)", "function vutils.save_image real_img = imgs_tcpu[-1][0:num] vutils.save_image( real_img, '%s/real_samples.png' % (image_dir),", "copy_G_params(self.netG) self.optimizerG, self.optimizersD = \\ define_optimizers(self.netG, self.netsD) self.criterion = nn.BCELoss()", "!= -1: nn.init.orthogonal(m.weight.data, 1.0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02)", "count): flag = count % 100 batch_size = self.real_imgs[0].size(0) criterion,", "covariance2) errG_total = errG_total + like_mu2 + like_cov2 if flag", "fake_img_list.append(fake_imgs[2].data.cpu()) else: self.save_singleimages(fake_imgs[-1], filenames, save_dir, split_dir, i, 256) # self.save_singleimages(fake_imgs[-2],", "= data_loader self.num_batches = len(self.data_loader) def prepare_data(self, data): imgs, w_imgs,", "= summary.scalar('G_like_mu2', like_mu2.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov2', like_cov2.item()) self.summary_writer.add_summary(sum_cov,", "nn.MSELoss()(mu1, mu2) like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\ nn.MSELoss()(covariance1,", "0: summary_D = summary.scalar('G_loss%d' % i, errG.item()) self.summary_writer.add_summary(summary_D, count) #", "covariance = covariance / num_pixels return mu, covariance def KL_loss(mu,", "summary_writer.flush() # ################# Text to image task############################ # class condGANTrainer(object):", "[int(ix) for ix in s_gpus] self.num_gpus = len(self.gpus) torch.cuda.set_device(self.gpus[0]) cudnn.benchmark", "[] for i in range(embedding_dim): fake_imgs, _, _ = netG(noise,", "= torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) istart = cfg.TRAIN.NET_G.rfind('_') +", "= predictions[istart:iend, :] result = -1. * np.log(np.max(part, 1)) result", "s_tmp[:s_tmp.rfind('/')] if not os.path.isdir(folder): print('Make a new folder: ', folder)", "image_dir): num = cfg.TRAIN.VIS_COUNT # The range of real_img (i.e.,", "Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one = torch.FloatTensor([1.0]) self.gradient_half = torch.FloatTensor([0.5]) nz = cfg.GAN.Z_DIM", "compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS", "errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\\ criterion(outputs[1], real_labels) errG = errG +", "= m.__class__.__name__ if classname.find('Conv') != -1: nn.init.orthogonal(m.weight.data, 1.0) elif classname.find('BatchNorm')", "in range(num_imgs): fake_img = fake_imgs[i][0:num] # The range of fake_img.data", "in s_gpus] self.num_gpus = len(self.gpus) torch.cuda.set_device(self.gpus[0]) cudnn.benchmark = True self.batch_size", "num = cfg.TRAIN.VIS_COUNT # The range of real_img (i.e., self.imgs_tcpu[i][0:num])", "= json.load(j) # Define the encoder/decoder structure for SAT model", "1 iend = cfg.TRAIN.NET_G.rfind('.') count = cfg.TRAIN.NET_G[istart:iend] count = int(count)", "== 0: summary_D = summary.scalar('G_loss%d' % i, errG.item()) self.summary_writer.add_summary(summary_D, count)", "i in range(num_imgs): fake_img = fake_imgs[i][0:num] # The range of", "% 100 == 0: summary_D = summary.scalar('D_loss', errD_total.item()) summary_G =", "Generate fake images ###################################################### noise.data.normal_(0, 1) self.fake_imgs, self.mu, self.logvar =", "0: netsD.append(D_NET64()) if cfg.TREE.BRANCH_NUM > 1: netsD.append(D_NET128()) if cfg.TREE.BRANCH_NUM >", "KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.mean(KLD_element).mul_(-0.5) return KLD def weights_init(m):", "os.path.isdir(folder): print('Make a new folder: ', folder) mkdir_p(folder) # savename", "\\ (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) kl = np.mean(np.sum(kl, 1))", "backup_para = copy_G_params(self.netG) load_params(self.netG, avg_param_G) # self.fake_imgs, _, _ =", "decode_lengths, batch_first=True).cuda() targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda() SATloss +=", "def prepare_data(self, data): imgs, w_imgs, t_embedding, _ = data real_vimgs,", "'test': split_dir = 'valid' netG = G_NET() netG.apply(weights_init) netG =", "caps_sorted[:, 1:] scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda() targets, _", "len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS", "still [-1. 1]... vutils.save_image( fake_img.data, '%s/count_%09d_fake_samples_%d.png' % (image_dir, count, i),", "= errG_total + like_mu1 + like_cov1 if flag == 0:", "cfg.TRAIN.NET_G != '': state_dict = torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G)", "like_mu2.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov2', like_cov2.item()) self.summary_writer.add_summary(sum_cov, count) if", "scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda() targets, _ = pack_padded_sequence(targets,", "storage, loc: storage) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) # the path", "compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS", "[] for i in range(num_splits): istart = i * predictions.shape[0]", "256) # self.save_singleimages(fake_imgs[-2], filenames, # save_dir, split_dir, i, 128) #", "= torch.FloatTensor([0.5]) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) fixed_noise", "(*) Forward fake images to SAT ###################################################### from SATmodels import", "t_embeddings = Variable(t_embeddings) # print(t_embeddings[:, 0, :], t_embeddings.size(1)) embedding_dim =", "t_embeddings.size(1)) embedding_dim = t_embeddings.size(1) batch_size = imgs[0].size(0) noise.data.resize_(batch_size, nz) noise.data.normal_(0,", "= [batch_size, 3, 64, 64] # self.fake_imgs[1].shape = [batch_size, 3,", "back propagation errG_total += SATloss errG_total.backward() self.optimizerG.step() ####################################################### # (*)", "by function vutils.save_image real_img = imgs_tcpu[-1][0:num] vutils.save_image( real_img, '%s/real_samples.png' %", "= os.path.join(output_dir, 'Image') self.log_dir = os.path.join(output_dir, 'Log') mkdir_p(self.model_dir) mkdir_p(self.image_dir) mkdir_p(self.log_dir)", "istart = cfg.TRAIN.NET_G.rfind('_') + 1 iend = cfg.TRAIN.NET_G.rfind('.') count =", "= np.transpose(real_img_set, (1, 2, 0)) real_img_set = real_img_set * 255", "self.summary_writer.add_summary(summary_G, count) self.summary_writer.add_summary(summary_KL, count) count += 1 ####################################################### # (*)", "cfg.TRAIN.NET_G == '': print('Error: the path for morels is not", "fake_labels) # errD_real = errD_real + errD_real_uncond errD_wrong = errD_wrong", "= deepcopy(list(p.data for p in model.parameters())) return flatten def compute_inception_score(predictions,", "def KL_loss(mu, logvar): # -0.5 * sum(1 + log(sigma^2) -", "import os import time from PIL import Image, ImageFont, ImageDraw", "i, 128) # self.save_singleimages(fake_imgs[-3], filenames, # save_dir, split_dir, i, 64)", "load_params(self.netG, avg_param_G) # self.fake_imgs, _, _ = self.netG(fixed_noise, self.txt_embedding) save_img_results(self.imgs_tcpu,", "normalize=True) def save_singleimages(self, images, filenames, save_dir, split_dir, sentenceID, imsize): for", "start_count = load_network(self.gpus) avg_param_G = copy_G_params(self.netG) self.optimizerG, self.optimizersD = \\", "cfg.TREE.BRANCH_NUM > 3: netsD.append(D_NET512()) if cfg.TREE.BRANCH_NUM > 4: netsD.append(D_NET1024()) #", "G and SAT first, then back propagation errG_total += SATloss", "torch import torch.nn as nn from torch.autograd import Variable import", "= copy_G_params(self.netG) load_params(self.netG, avg_param_G) # self.fake_imgs, _, _ = self.netG(fixed_noise,", "found!') else: # Build and load the generator if split_dir", "save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) self.summary_writer.close() def save_superimages(self, images_list, filenames,", "with G and SAT first, then back propagation errG_total +=", "[] if cfg.TREE.BRANCH_NUM > 0: netsD.append(D_NET64()) if cfg.TREE.BRANCH_NUM > 1:", "= \\ define_optimizers(self.netG, self.netsD) self.criterion = nn.BCELoss() self.SATcriterion = nn.CrossEntropyLoss()", "= imgs[0].size(0) noise.data.resize_(batch_size, nz) noise.data.normal_(0, 1) fake_img_list = [] for", "part = predictions[istart:iend, :] result = -1. * np.log(np.max(part, 1))", "= (fake_img_set + 1) * 255 / 2 fake_img_set =", "Text to image task############################ # class condGANTrainer(object): def __init__(self, output_dir,", "dropout=0.5).cuda() decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=4e-4) encoder =", "errG_patch errG_total = errG_total + errG if flag == 0:", "self.optimizerG, self.optimizersD = \\ define_optimizers(self.netG, self.netsD) self.criterion = nn.BCELoss() self.SATcriterion", "= t_embeddings.size(1) batch_size = imgs[0].size(0) noise.data.resize_(batch_size, nz) noise.data.normal_(0, 1) fake_img_list", "% (cfg.TRAIN.NET_D, i)) netsD[i].load_state_dict(state_dict) inception_model = INCEPTION_V3() if cfg.CUDA: netG.cuda()", "range(self.num_Ds): outputs = self.netsD[i](self.fake_imgs[i], mu) errG = criterion(outputs[0], real_labels) if", "fake_labels) errD_fake = criterion(fake_logits[0], fake_labels) if len(real_logits) > 1 and", "= img.permute(1, 2, 0).data.cpu().numpy() im = Image.fromarray(ndarr) im.save(fullpath) def evaluate(self,", "istart = s_tmp.rfind('_') + 1 iend = s_tmp.rfind('.') iteration =", "# (0) Prepare training data ###################################################### self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \\", "optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999)) return optimizerG, optimizersD def save_model(netG, avg_param_G,", "if cfg.CUDA: vembedding = Variable(t_embedding).cuda() else: vembedding = Variable(t_embedding) for", "folder with data files saved by create_input_files.py data_name = 'CUB_5_cap_per_img_5_min_word_freq'", "in enumerate(zip(self.data_loader, train_loader), 0): data_1 = data[0] _, caps, caplens", "((1. - alphas.sum(dim=1)) ** 2).mean() # Set zero_grad for encoder/decoder", "= [] count = start_count start_epoch = start_count // (self.num_batches)", "self.train_Dnet(i, count) errD_total += errD ####################################################### # (3) Update G", "scores.append(np.exp(kl)) return np.mean(scores), np.std(scores) def negative_log_posterior_probability(predictions, num_splits=1): # print('predictions', predictions.shape)", "ImageFont, ImageDraw from copy import deepcopy from miscc.config import cfg", "save generated images s_tmp = cfg.TRAIN.NET_G istart = s_tmp.rfind('_') +", "line for real samples if epoch == start_epoch and step", "mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1", "# self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 128) self.save_superimages(fake_img_list, filenames, save_dir,", "= pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda() targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda()", "self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus self.max_epoch = cfg.TRAIN.MAX_EPOCH self.snapshot_interval =", "train_Dnet(self, idx, count): flag = count % 100 batch_size =", "# print(img.size()) img = img.view(1, 3, imsize, imsize) # print(img.size())", "# switch to evaluate mode netG.eval() for step, data in", "batch_size = img.size(0) channel_num = img.size(1) height = img.size(2) width", "count, i), normalize=True) fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set = np.transpose(fake_img_set, (1,", "errD = errD_real + errD_wrong + errD_fake else: errD =", "classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.orthogonal(m.weight.data, 1.0) elif", "= '%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID) # range from [-1,", "* (errD_wrong + errD_fake) # backward errD.backward() # update parameters", "0: if self.num_Ds > 1: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1]) mu2,", "64] # self.fake_imgs[1].shape = [batch_size, 3, 128, 128] # self.fake_imgs[2].shape", "m_nlpp = summary.scalar('NLPP_mean', mean_nlpp) self.summary_writer.add_summary(m_nlpp, count) # predictions = []", "errG_total + kl_loss # Postpone the backward propagation # errG_total.backward()", "kl_loss # Postpone the backward propagation # errG_total.backward() # self.optimizerG.step()", "# log if flag == 0: summary_D = summary.scalar('D_loss%d' %", "= start_count // (self.num_batches) for epoch in range(start_epoch, self.max_epoch): start_t", "%.2f Loss_KL: %.2f Time: %.2fs ''' # D(real): %.4f D(wrong):%.4f", "0.406], std=[0.229, 0.224, 0.225]) # Show, Attend, and Tell Dataloader", "# Save images backup_para = copy_G_params(self.netG) load_params(self.netG, avg_param_G) # self.fake_imgs,", "TODO: if cfg.TREE.BRANCH_NUM > 5: for i in range(len(netsD)): netsD[i].apply(weights_init)", "# self.optimizerG.step() return kl_loss, errG_total def train(self): self.netG, self.netsD, self.num_Ds,\\", "summary.scalar('D_loss', errD_total.item()) summary_G = summary.scalar('G_loss', errG_total.item()) summary_KL = summary.scalar('KL_loss', kl_loss.item())", "= cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(wrong_logits[1], real_labels) errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS *", "def save_img_results(imgs_tcpu, fake_imgs, num_imgs, count, image_dir, summary_writer): num = cfg.TRAIN.VIS_COUNT", "(cfg.TRAIN.NET_D, i)) netsD[i].load_state_dict(state_dict) inception_model = INCEPTION_V3() if cfg.CUDA: netG.cuda() for", "predictions = [] count = start_count start_epoch = start_count //", "len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS", "[batch_size, 3, 64, 64] # self.fake_imgs[1].shape = [batch_size, 3, 128,", "+ errD_wrong + errD_fake else: errD = errD_real + 0.5", "from torch.nn.utils.rnn import pack_padded_sequence fine_tune_encoder = False # Read word", "time.time() print('''[%d/%d][%d] Loss_D: %.2f Loss_G: %.2f Loss_KL: %.2f Time: %.2fs", "new folder: ', folder) mkdir_p(folder) fullpath = '%s_%d_sentence%d.png' % (s_tmp,", "inception_model.cuda() inception_model.eval() return netG, netsD, len(netsD), inception_model, count def define_optimizers(netG,", "1 mu = img.mean(2, keepdim=True).mean(3, keepdim=True) # batch_size * channel_num", "netG = torch.nn.DataParallel(netG, device_ids=self.gpus) print(netG) # state_dict = torch.load(cfg.TRAIN.NET_G) state_dict", "\\ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu2 + like_cov2", "def train(self): self.netG, self.netsD, self.num_Ds,\\ self.inception_model, start_count = load_network(self.gpus) avg_param_G", "= criterion(fake_logits[0], fake_labels) if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS >", "= self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy()) if count % 100 == 0: summary_D", "predictions = [] end_t = time.time() print('''[%d/%d][%d] Loss_D: %.2f Loss_G:", "def save_model(netG, avg_param_G, netsD, epoch, model_dir): load_params(netG, avg_param_G) torch.save( netG.state_dict(),", "self.save_singleimages(fake_imgs[-3], filenames, # save_dir, split_dir, i, 64) # break if", "real_logits = netD(real_imgs, mu.detach()) wrong_logits = netD(wrong_imgs, mu.detach()) fake_logits =", "flatten = deepcopy(list(p.data for p in model.parameters())) return flatten def", "fine_tune_encoder else None SATloss = 0 # Compute the SAT", "cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(real_logits[1], real_labels)", "decoder = DecoderWithAttention(attention_dim=512, embed_dim=512, decoder_dim=512, vocab_size=len(word_map), dropout=0.5).cuda() decoder_optimizer = torch.optim.Adam(params=filter(lambda", "fake_img_set) summary_writer.add_summary(sup_fake_img, count) summary_writer.flush() # ################# Text to image task############################", "a new folder: ', folder) mkdir_p(folder) # savename = '%s_%d.png'", "1: netsD.append(D_NET128()) if cfg.TREE.BRANCH_NUM > 2: netsD.append(D_NET256()) if cfg.TREE.BRANCH_NUM >", "= errD_real + errD_real_uncond errD_wrong = errD_wrong + errD_wrong_uncond errD_fake", "real_img = imgs_tcpu[-1][0:num] vutils.save_image( real_img, '%s/real_samples.png' % (image_dir), normalize=True) real_img_set", "j: word_map = json.load(j) # Define the encoder/decoder structure for", "errD_total.item(), errG_total.item(), kl_loss.item(), end_t - start_t)) save_model(self.netG, avg_param_G, self.netsD, count,", "channel_num covariance = torch.bmm(img_hat, img_hat_transpose) covariance = covariance / num_pixels", "for encoder/decoder decoder_optimizer.zero_grad() if encoder_optimizer is not None: encoder_optimizer.zero_grad() #######################################################", "self.summary_writer.add_summary(sum_cov, count) kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL errG_total =", "= inception_model.cuda() inception_model.eval() return netG, netsD, len(netsD), inception_model, count def", "+ errD_wrong_uncond errD_fake = errD_fake + errD_fake_uncond # errD =", "vutils.save_image real_img = imgs_tcpu[-1][0:num] vutils.save_image( real_img, '%s/real_samples.png' % (image_dir), normalize=True)", "1) * 255 / 2 fake_img_set = fake_img_set.astype(np.uint8) sup_fake_img =", "= criterion(outputs[0], real_labels) if len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS >", "data[1] data = data_1 ####################################################### # (0) Prepare training data", "== 0: save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) # Save images", "%.2fs ''' # D(real): %.4f D(wrong):%.4f D(fake) %.4f % (epoch,", "logvar) * cfg.TRAIN.COEFF.KL errG_total = errG_total + kl_loss # Postpone", "def compute_inception_score(predictions, num_splits=1): # print('predictions', predictions.shape) scores = [] for", "= os.path.join(data_folder, 'WORDMAP_' + data_name + '.json') with open(word_map_file, 'r')", "fake_img.data (i.e., self.fake_imgs[i][0:num]) # is still [-1. 1]... vutils.save_image( fake_img.data,", "= cfg.TRAIN.MAX_EPOCH self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader = data_loader self.num_batches =", "SAT loss after forwarding the SAT model for idx in", "len(self.gpus) torch.cuda.set_device(self.gpus[0]) cudnn.benchmark = True self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus", "nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) if cfg.CUDA: netG.cuda()", "= img.size(2) width = img.size(3) num_pixels = height * width", "errG_total = errG_total + like_mu1 + like_cov1 if flag ==", "flatten def compute_inception_score(predictions, num_splits=1): # print('predictions', predictions.shape) scores = []", "= fake_imgs[i][0:num] # The range of fake_img.data (i.e., self.fake_imgs[i][0:num]) #", "mkdir_p(folder) fullpath = '%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID) # range", "####################################################### # (2) Update D network ###################################################### errD_total = 0", "mkdir_p(self.log_dir) self.summary_writer = FileWriter(self.log_dir) s_gpus = cfg.GPU_ID.split(',') self.gpus = [int(ix)", "train_Gnet(self, count): self.netG.zero_grad() errG_total = 0 flag = count %", "mean, std = compute_inception_score(predictions, 10) # print('mean:', mean, 'std', std)", "lr=1e-4) if fine_tune_encoder else None SATloss = 0 # Compute", "self.real_imgs[0].size(0) criterion, mu, logvar = self.criterion, self.mu, self.logvar real_labels =", "errG = criterion(outputs[0], real_labels) if len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS", "= [] for i in range(num_splits): istart = i *", "folder) mkdir_p(folder) # savename = '%s_%d.png' % (s_tmp, imsize) super_img", "> 0: netsD.append(D_NET64()) if cfg.TREE.BRANCH_NUM > 1: netsD.append(D_NET128()) if cfg.TREE.BRANCH_NUM", "nrow=10, normalize=True) def save_singleimages(self, images, filenames, save_dir, split_dir, sentenceID, imsize):", "batch_size * channel_num * channel_num covariance = torch.bmm(img_hat, img_hat_transpose) covariance", "split_dir, sentenceID, imsize): for i in range(images.size(0)): s_tmp = '%s/single_samples/%s/%s'", "range(len(netsD)): print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i)) state_dict = torch.load('%s%d.pth' %", "parameters optD.step() # log if flag == 0: summary_D =", "- mu.expand_as(img) img_hat = img_hat.view(batch_size, channel_num, num_pixels) # batch_size *", "output_dir, data_loader, imsize): if cfg.TRAIN.FLAG: self.model_dir = os.path.join(output_dir, 'Model') self.image_dir", "count): self.netG.zero_grad() errG_total = 0 flag = count % 100", "img_hat_transpose = img_hat.transpose(1, 2) # batch_size * channel_num * channel_num", "= torch.utils.data.DataLoader( CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])), batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True)", "iend = cfg.TRAIN.NET_G.rfind('.') count = cfg.TRAIN.NET_G[istart:iend] count = int(count) +", "0.456, 0.406], std=[0.229, 0.224, 0.225]) # Show, Attend, and Tell", "# range from [-1, 1] to [0, 255] img =", "[] end_t = time.time() print('''[%d/%d][%d] Loss_D: %.2f Loss_G: %.2f Loss_KL:", "ndarr = img.permute(1, 2, 0).data.cpu().numpy() im = Image.fromarray(ndarr) im.save(fullpath) def", "i in range(num_splits): istart = i * predictions.shape[0] // num_splits", "= self.gradient_half.cuda() noise, fixed_noise = noise.cuda(), fixed_noise.cuda() predictions = []", "(image_dir), normalize=True) real_img_set = vutils.make_grid(real_img).numpy() real_img_set = np.transpose(real_img_set, (1, 2,", "= netsD[i] torch.save( netD.state_dict(), '%s/netD%d.pth' % (model_dir, i)) print('Save G/Ds", "range(num_splits): istart = i * predictions.shape[0] // num_splits iend =", ":]) if cfg.TEST.B_EXAMPLE: # fake_img_list.append(fake_imgs[0].data.cpu()) # fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu()) else: self.save_singleimages(fake_imgs[-1],", "+ 1 * ((1. - alphas.sum(dim=1)) ** 2).mean() # Set", "1 * 1 mu = img.mean(2, keepdim=True).mean(3, keepdim=True) # batch_size", "range from [-1, 1] to [0, 255] img = images[i].add(1).div(2).mul(255).clamp(0,", "load_network(gpus): netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=gpus) print(netG)", "D(real): %.4f D(wrong):%.4f D(fake) %.4f % (epoch, self.max_epoch, self.num_batches, errD_total.item(),", "scores.append(result) return np.mean(scores), np.std(scores) def load_network(gpus): netG = G_NET() netG.apply(weights_init)", "model_dir): load_params(netG, avg_param_G) torch.save( netG.state_dict(), '%s/netG_%d.pth' % (model_dir, epoch)) for", "fake_img_list = [] for i in range(embedding_dim): fake_imgs, _, _", "cfg.TRAIN.NET_D != '': for i in range(len(netsD)): print('Load %s_%d.pth' %", "copy import deepcopy from miscc.config import cfg from miscc.utils import", "', cfg.TRAIN.NET_G) # the path to save generated images s_tmp", "0), 0))) kl = np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return np.mean(scores), np.std(scores)", "cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) if cfg.CUDA: netG.cuda() noise =", "= self.train_Dnet(i, count) errD_total += errD ####################################################### # (3) Update", "D(wrong):%.4f D(fake) %.4f % (epoch, self.max_epoch, self.num_batches, errD_total.item(), errG_total.item(), kl_loss.item(),", "scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(img, caps, caplens) targets", "update parameters optD.step() # log if flag == 0: summary_D", "img - mu.expand_as(img) img_hat = img_hat.view(batch_size, channel_num, num_pixels) # batch_size", "> 4: netsD.append(D_NET1024()) # TODO: if cfg.TREE.BRANCH_NUM > 5: for", "= Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one = torch.FloatTensor([1.0]) self.gradient_half =", "in range(len(self.fake_imgs)): img = encoder(self.fake_imgs[idx]) scores, caps_sorted, decode_lengths, alphas, sort_ind", "+ 1 iend = s_tmp.rfind('.') iteration = int(s_tmp[istart:iend]) s_tmp =", "mu.detach()) # errD_real = criterion(real_logits[0], real_labels) errD_wrong = criterion(wrong_logits[0], fake_labels)", "Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one = torch.FloatTensor([1.0]) self.gradient_half = torch.FloatTensor([0.5])", "weights decoder_optimizer.step() if encoder_optimizer is not None: encoder_optimizer.step() ####################################################### #", "self.image_dir = os.path.join(output_dir, 'Image') self.log_dir = os.path.join(output_dir, 'Log') mkdir_p(self.model_dir) mkdir_p(self.image_dir)", "(s_tmp, iteration) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) if", "step, data in enumerate(zip(self.data_loader, train_loader), 0): data_1 = data[0] _,", "self.gradient_one = self.gradient_one.cuda() self.gradient_half = self.gradient_half.cuda() noise, fixed_noise = noise.cuda(),", "errD_fake_uncond # errD = errD_real + errD_wrong + errD_fake else:", "classname.find('Conv') != -1: nn.init.orthogonal(m.weight.data, 1.0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0,", "num_Ds = len(netsD) for i in range(num_Ds): opt = optim.Adam(netsD[i].parameters(),", "2, 0).data.cpu().numpy() im = Image.fromarray(ndarr) im.save(fullpath) def evaluate(self, split_dir): if", "avg_param_G): avg_p.mul_(0.999).add_(0.001, p.data) # Combine with G and SAT first,", "state_dict = torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) istart = cfg.TRAIN.NET_G.rfind('_')", "nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu1 + like_cov1 if", "self.summary_writer.add_summary(m_nlpp, count) # predictions = [] end_t = time.time() print('''[%d/%d][%d]", "def load_network(gpus): netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=gpus)", "# errD = errD_real + errD_wrong + errD_fake else: errD", "std = compute_inception_score(predictions, 10) # print('mean:', mean, 'std', std) m_incep", "folder: ', folder) mkdir_p(folder) # savename = '%s_%d.png' % (s_tmp,", "torch.nn.DataParallel(netsD[i], device_ids=gpus) # print(netsD[i]) print('# of netsD', len(netsD)) count =", "= time.time() print('''[%d/%d][%d] Loss_D: %.2f Loss_G: %.2f Loss_KL: %.2f Time:", "= nn.CrossEntropyLoss() self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one =", "split_dir = 'valid' netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG,", ":] kl = part * \\ (np.log(part) - np.log(np.expand_dims(np.mean(part, 0),", "else: # Build and load the generator if split_dir ==", "batch_first=True).cuda() SATloss += self.SATcriterion(scores, targets) + 1 * ((1. -", "errG_total = errG_total + errG if flag == 0: summary_D", "epoch in range(start_epoch, self.max_epoch): start_t = time.time() # for step,", "* \\ criterion(real_logits[1], real_labels) errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(wrong_logits[1],", "################# Text to image task############################ # class condGANTrainer(object): def __init__(self,", "errG_total.item()) summary_KL = summary.scalar('KL_loss', kl_loss.item()) self.summary_writer.add_summary(summary_D, count) self.summary_writer.add_summary(summary_G, count) self.summary_writer.add_summary(summary_KL,", "self.inception_model, start_count = load_network(self.gpus) avg_param_G = copy_G_params(self.netG) self.optimizerG, self.optimizersD =", "0.999)) optimizersD.append(opt) # G_opt_paras = [] # for p in", "nn.init.orthogonal(m.weight.data, 1.0) if m.bias is not None: m.bias.data.fill_(0.0) def load_params(model,", "in range(images.size(0)): s_tmp = '%s/single_samples/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder", "\\ define_optimizers(self.netG, self.netsD) self.criterion = nn.BCELoss() self.SATcriterion = nn.CrossEntropyLoss() self.real_labels", "mkdir_p(self.image_dir) mkdir_p(self.log_dir) self.summary_writer = FileWriter(self.log_dir) s_gpus = cfg.GPU_ID.split(',') self.gpus =", "outputs = self.netsD[i](self.fake_imgs[i], mu) errG = criterion(outputs[0], real_labels) if len(outputs)", "print('predictions', predictions.shape) scores = [] for i in range(num_splits): istart", "0 flag = count % 100 batch_size = self.real_imgs[0].size(0) criterion,", "* self.num_gpus self.max_epoch = cfg.TRAIN.MAX_EPOCH self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader =", "os.path.join(data_folder, 'WORDMAP_' + data_name + '.json') with open(word_map_file, 'r') as", "1 * ((1. - alphas.sum(dim=1)) ** 2).mean() # Set zero_grad", "avg_param_G, netsD, epoch, model_dir): load_params(netG, avg_param_G) torch.save( netG.state_dict(), '%s/netG_%d.pth' %", "self.criterion = nn.BCELoss() self.SATcriterion = nn.CrossEntropyLoss() self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels", "= errG + errG_patch errG_total = errG_total + errG if", "data in enumerate(self.data_loader, 0): for step, data in enumerate(zip(self.data_loader, train_loader),", "img_hat.transpose(1, 2) # batch_size * channel_num * channel_num covariance =", "count) return errD def train_Gnet(self, count): self.netG.zero_grad() errG_total = 0", "torch.mean(KLD_element).mul_(-0.5) return KLD def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv')", "* num_pixels * channel_num img_hat_transpose = img_hat.transpose(1, 2) # batch_size", "####################################################### # (*) Update SAT network: ###################################################### # Update weights", "vutils import numpy as np import os import time from", "range(batch_size): s_tmp = '%s/super/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder =", "caps_sorted, decode_lengths, alphas, sort_ind = decoder(img, caps, caplens) targets =", "kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL errG_total = errG_total +", "and Inception score: ###################################################### pred = self.inception_model(self.fake_imgs[-1].detach()) predictions.append(pred.data.cpu().numpy()) if count", "summary.scalar('D_loss%d' % idx, errD.item()) self.summary_writer.add_summary(summary_D, count) return errD def train_Gnet(self,", "split_dir): if cfg.TRAIN.NET_G == '': print('Error: the path for morels", "netsD.append(D_NET256()) if cfg.TREE.BRANCH_NUM > 3: netsD.append(D_NET512()) if cfg.TREE.BRANCH_NUM > 4:", "in range(start_epoch, self.max_epoch): start_t = time.time() # for step, data", "step, data in enumerate(self.data_loader, 0): imgs, t_embeddings, filenames = data", "cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\\ criterion(outputs[1], real_labels) errG", "5 * \\ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total + like_mu2", "batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True) if cfg.CUDA: self.criterion.cuda() self.SATcriterion.cuda() # Compute", "condGANTrainer(object): def __init__(self, output_dir, data_loader, imsize): if cfg.TRAIN.FLAG: self.model_dir =", "criterion(outputs[1], real_labels) errG = errG + errG_patch errG_total = errG_total", "255] img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte() ndarr = img.permute(1, 2, 0).data.cpu().numpy()", "else: vembedding = Variable(t_embedding) for i in range(self.num_Ds): if cfg.CUDA:", "time.time() # for step, data in enumerate(self.data_loader, 0): for step,", "self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov1', like_cov1.item()) self.summary_writer.add_summary(sum_cov, count) kl_loss =", "= mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.mean(KLD_element).mul_(-0.5) return KLD def weights_init(m): classname", "# errD_real = criterion(real_logits[0], real_labels) errD_wrong = criterion(wrong_logits[0], fake_labels) errD_fake", "3, 256, 256] ####################################################### # (*) Forward fake images to", "(i.e., self.fake_imgs[i][0:num]) # is still [-1. 1]... vutils.save_image( fake_img.data, '%s/count_%09d_fake_samples_%d.png'", "* channel_num * 1 * 1 mu = img.mean(2, keepdim=True).mean(3,", "if cfg.TEST.B_EXAMPLE: # fake_img_list.append(fake_imgs[0].data.cpu()) # fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu()) else: self.save_singleimages(fake_imgs[-1], filenames,", "###################################################### from SATmodels import Encoder, DecoderWithAttention from torch.nn.utils.rnn import pack_padded_sequence", "six.moves import range import torchvision.transforms as transforms import torch.backends.cudnn as", "real_img_set) summary_writer.add_summary(sup_real_img, count) for i in range(num_imgs): fake_img = fake_imgs[i][0:num]", "# netD.zero_grad() # Forward real_labels = self.real_labels[:batch_size] fake_labels = self.fake_labels[:batch_size]", "deepcopy from miscc.config import cfg from miscc.utils import mkdir_p from", "encoder.fine_tune(fine_tune_encoder) encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()), lr=1e-4) if fine_tune_encoder", "keepdim=True) # batch_size * channel_num * num_pixels img_hat = img", "= torch.nn.DataParallel(netG, device_ids=gpus) print(netG) netsD = [] if cfg.TREE.BRANCH_NUM >", "// num_splits part = predictions[istart:iend, :] kl = part *", "% 100 batch_size = self.real_imgs[0].size(0) criterion, mu = self.criterion, self.mu", "wrong_vimgs = [], [] if cfg.CUDA: vembedding = Variable(t_embedding).cuda() else:", "* predictions.shape[0] // num_splits iend = (i + 1) *", "import range import torchvision.transforms as transforms import torch.backends.cudnn as cudnn", "nn.init.orthogonal(m.weight.data, 1.0) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif", "in range(num_splits): istart = i * predictions.shape[0] // num_splits iend", "if flag == 0: summary_D = summary.scalar('D_loss%d' % idx, errD.item())", "= torch.nn.DataParallel(netsD[i], device_ids=gpus) # print(netsD[i]) print('# of netsD', len(netsD)) count", "== '': print('Error: the path for morels is not found!')", "Compute color consistency losses if cfg.TRAIN.COEFF.COLOR_LOSS > 0: if self.num_Ds", "# is still [-1. 1]... vutils.save_image( fake_img.data, '%s/count_%09d_fake_samples_%d.png' % (image_dir,", "* channel_num * channel_num covariance = torch.bmm(img_hat, img_hat_transpose) covariance =", "cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(real_logits[1], real_labels) errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\", "if encoder_optimizer is not None: encoder_optimizer.zero_grad() ####################################################### # (2) Update", "= [] # for p in netG.parameters(): # if p.requires_grad:", "0))) kl = np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return np.mean(scores), np.std(scores) def", "train(self): self.netG, self.netsD, self.num_Ds,\\ self.inception_model, start_count = load_network(self.gpus) avg_param_G =", "logvar): # -0.5 * sum(1 + log(sigma^2) - mu^2 -", "= torch.mean(KLD_element).mul_(-0.5) return KLD def weights_init(m): classname = m.__class__.__name__ if", "real_vimgs, wrong_vimgs = [], [] if cfg.CUDA: vembedding = Variable(t_embedding).cuda()", "netG.cuda() for i in range(len(netsD)): netsD[i].cuda() inception_model = inception_model.cuda() inception_model.eval()", "if cfg.TREE.BRANCH_NUM > 4: netsD.append(D_NET1024()) # TODO: if cfg.TREE.BRANCH_NUM >", "####################################################### # (0) Prepare training data ###################################################### self.imgs_tcpu, self.real_imgs, self.wrong_imgs,", "# Compute color consistency losses if cfg.TRAIN.COEFF.COLOR_LOSS > 0: if", "Postpone the backward propagation # errG_total.backward() # self.optimizerG.step() return kl_loss,", "128) # self.save_singleimages(fake_imgs[-3], filenames, # save_dir, split_dir, i, 64) #", "noise.data.normal_(0, 1) fake_img_list = [] for i in range(embedding_dim): fake_imgs,", "+ data_name + '.json') with open(word_map_file, 'r') as j: word_map", "and load the generator if split_dir == 'test': split_dir =", "= compute_mean_covariance(self.fake_imgs[-2]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS", "data): imgs, w_imgs, t_embedding, _ = data real_vimgs, wrong_vimgs =", "self.mu, self.logvar = \\ self.netG(noise, self.txt_embedding) # len(self.fake_imgs) = NUM_BRANCHES", "criterion(real_logits[0], real_labels) errD_wrong = criterion(wrong_logits[0], fake_labels) errD_fake = criterion(fake_logits[0], fake_labels)", "alphas, sort_ind = decoder(img, caps, caplens) targets = caps_sorted[:, 1:]", "+= SATloss errG_total.backward() self.optimizerG.step() ####################################################### # (*) Update SAT network:", "self.netsD) self.criterion = nn.BCELoss() self.SATcriterion = nn.CrossEntropyLoss() self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1))", "import print_function from six.moves import range import torchvision.transforms as transforms", "step == 0: print ('Checking real samples at first...') save_real(self.imgs_tcpu,", "embed_dim=512, decoder_dim=512, vocab_size=len(word_map), dropout=0.5).cuda() decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()),", "summary_D = summary.scalar('D_loss', errD_total.item()) summary_G = summary.scalar('G_loss', errG_total.item()) summary_KL =", "# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)", "*\\ criterion(outputs[1], real_labels) errG = errG + errG_patch errG_total =", "miscc.utils import mkdir_p from CaptionDatasets import * from tensorboard import", "elif classname.find('Linear') != -1: nn.init.orthogonal(m.weight.data, 1.0) if m.bias is not", "import torchvision.transforms as transforms import torch.backends.cudnn as cudnn import torch", "# save_dir, split_dir, i, 128) # self.save_singleimages(fake_imgs[-3], filenames, # save_dir,", "torchvision.transforms as transforms import torch.backends.cudnn as cudnn import torch import", "propagation # errG_total.backward() # self.optimizerG.step() return kl_loss, errG_total def train(self):", "like_cov1.item()) self.summary_writer.add_summary(sum_cov, count) kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL errG_total", "torch.backends.cudnn as cudnn import torch import torch.nn as nn from", "= \\ self.netG(noise, self.txt_embedding) # len(self.fake_imgs) = NUM_BRANCHES # self.fake_imgs[0].shape", "self.SATcriterion(scores, targets) + 1 * ((1. - alphas.sum(dim=1)) ** 2).mean()", "> 3: netsD.append(D_NET512()) if cfg.TREE.BRANCH_NUM > 4: netsD.append(D_NET1024()) # TODO:", "import Variable import torch.optim as optim import torchvision.utils as vutils", "> 0: if self.num_Ds > 1: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1])", "the backward propagation # errG_total.backward() # self.optimizerG.step() return kl_loss, errG_total", "is not None: encoder_optimizer.step() ####################################################### # (*) Prediction and Inception", "from CaptionDatasets import * from tensorboard import summary from tensorboard", "2, 0)) fake_img_set = (fake_img_set + 1) * 255 /", "transforms import torch.backends.cudnn as cudnn import torch import torch.nn as", "decoder.parameters()), lr=4e-4) encoder = Encoder().cuda() encoder.fine_tune(fine_tune_encoder) encoder_optimizer = torch.optim.Adam(params=filter(lambda p:", "errD.item()) self.summary_writer.add_summary(summary_D, count) return errD def train_Gnet(self, count): self.netG.zero_grad() errG_total", "criterion(fake_logits[1], fake_labels) # errD_real = errD_real + errD_real_uncond errD_wrong =", "if p.requires_grad: # G_opt_paras.append(p) optimizerG = optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999))", "64, 64] # self.fake_imgs[1].shape = [batch_size, 3, 128, 128] #", "start_count start_epoch = start_count // (self.num_batches) for epoch in range(start_epoch,", "ix in s_gpus] self.num_gpus = len(self.gpus) torch.cuda.set_device(self.gpus[0]) cudnn.benchmark = True", "(0) Prepare training data ###################################################### self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \\ self.txt_embedding", "count % 100 batch_size = self.real_imgs[0].size(0) criterion, mu, logvar =", "= cfg.GPU_ID.split(',') self.gpus = [int(ix) for ix in s_gpus] self.num_gpus", "p in netG.parameters(): # if p.requires_grad: # G_opt_paras.append(p) optimizerG =", "width # batch_size * channel_num * 1 * 1 mu", "j in range(num_sentences): img = images_list[j][i] # print(img.size()) img =", "task############################ # class condGANTrainer(object): def __init__(self, output_dir, data_loader, imsize): if", "self.fake_imgs[2].shape = [batch_size, 3, 256, 256] ####################################################### # (*) Forward", "= self.netG(fixed_noise, self.txt_embedding) save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds, count, self.image_dir, self.summary_writer) #", "def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.orthogonal(m.weight.data,", "= self.real_labels[:batch_size] for i in range(self.num_Ds): outputs = self.netsD[i](self.fake_imgs[i], mu)", "# (3) Update G network: maximize log(D(G(z))) ###################################################### kl_loss, errG_total", "= torch.nn.DataParallel(netG, device_ids=self.gpus) print(netG) # state_dict = torch.load(cfg.TRAIN.NET_G) state_dict =", "like_cov1 if flag == 0: sum_mu = summary.scalar('G_like_mu1', like_mu1.item()) self.summary_writer.add_summary(sum_mu,", "netsD[i].apply(weights_init) netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus) # print(netsD[i]) print('# of netsD',", "imsize): batch_size = images_list[0].size(0) num_sentences = len(images_list) for i in", "storage) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) # the path to save", "with open(word_map_file, 'r') as j: word_map = json.load(j) # Define", "> 1: netsD.append(D_NET128()) if cfg.TREE.BRANCH_NUM > 2: netsD.append(D_NET256()) if cfg.TREE.BRANCH_NUM", "[] num_Ds = len(netsD) for i in range(num_Ds): opt =", "summary.scalar('NLPP_mean', mean_nlpp) self.summary_writer.add_summary(m_nlpp, count) # predictions = [] end_t =", "def __init__(self, output_dir, data_loader, imsize): if cfg.TRAIN.FLAG: self.model_dir = os.path.join(output_dir,", "self.real_labels.cuda() self.fake_labels = self.fake_labels.cuda() self.gradient_one = self.gradient_one.cuda() self.gradient_half = self.gradient_half.cuda()", "count = 0 if cfg.TRAIN.NET_G != '': state_dict = torch.load(cfg.TRAIN.NET_G)", "0.5 * (errD_wrong + errD_fake) # backward errD.backward() # update", "cfg.CUDA: vembedding = Variable(t_embedding).cuda() else: vembedding = Variable(t_embedding) for i", "data real_vimgs, wrong_vimgs = [], [] if cfg.CUDA: vembedding =", "from six.moves import range import torchvision.transforms as transforms import torch.backends.cudnn", "img_hat.view(batch_size, channel_num, num_pixels) # batch_size * num_pixels * channel_num img_hat_transpose", "images backup_para = copy_G_params(self.netG) load_params(self.netG, avg_param_G) # self.fake_imgs, _, _", "# break if cfg.TEST.B_EXAMPLE: # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir,", "real_vimgs.append(Variable(imgs[i])) wrong_vimgs.append(Variable(w_imgs[i])) return imgs, real_vimgs, wrong_vimgs, vembedding def train_Dnet(self, idx,", "0.225]) # Show, Attend, and Tell Dataloader train_loader = torch.utils.data.DataLoader(", "for real real_logits = netD(real_imgs, mu.detach()) wrong_logits = netD(wrong_imgs, mu.detach())", "generator if split_dir == 'test': split_dir = 'valid' netG =", "num_pixels img_hat = img - mu.expand_as(img) img_hat = img_hat.view(batch_size, channel_num,", "# TODO: if cfg.TREE.BRANCH_NUM > 5: for i in range(len(netsD)):", "first...') save_real(self.imgs_tcpu, self.image_dir) ####################################################### # (1) Generate fake images ######################################################", "lr=4e-4) encoder = Encoder().cuda() encoder.fine_tune(fine_tune_encoder) encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad,", "covariance def KL_loss(mu, logvar): # -0.5 * sum(1 + log(sigma^2)", "count, image_dir, summary_writer): num = cfg.TRAIN.VIS_COUNT # The range of", "%.2f Time: %.2fs ''' # D(real): %.4f D(wrong):%.4f D(fake) %.4f", "return KLD def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') !=", "0): data_1 = data[0] _, caps, caplens = data[1] data", "self.real_imgs[0].size(0) criterion, mu = self.criterion, self.mu netD, optD = self.netsD[idx],", "mkdir_p from CaptionDatasets import * from tensorboard import summary from", "iend = s_tmp.rfind('.') iteration = int(s_tmp[istart:iend]) s_tmp = s_tmp[:s_tmp.rfind('/')] save_dir", "cfg.TEST.B_EXAMPLE: # fake_img_list.append(fake_imgs[0].data.cpu()) # fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu()) else: self.save_singleimages(fake_imgs[-1], filenames, save_dir,", "= negative_log_posterior_probability(predictions, 10) m_nlpp = summary.scalar('NLPP_mean', mean_nlpp) self.summary_writer.add_summary(m_nlpp, count) #", "p.data) # Combine with G and SAT first, then back", "D(fake) %.4f % (epoch, self.max_epoch, self.num_batches, errD_total.item(), errG_total.item(), kl_loss.item(), end_t", "1)) result = np.mean(result) scores.append(result) return np.mean(scores), np.std(scores) def load_network(gpus):", "= data[0] _, caps, caplens = data[1] data = data_1", "= errD_real + errD_wrong + errD_fake else: errD = errD_real", "classname.find('Linear') != -1: nn.init.orthogonal(m.weight.data, 1.0) if m.bias is not None:", "if len(predictions) > 500: predictions = np.concatenate(predictions, 0) mean, std", "split_dir == 'test': split_dir = 'valid' netG = G_NET() netG.apply(weights_init)", "batch_first=True).cuda() targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda() SATloss += self.SATcriterion(scores,", "errG_total.backward() # self.optimizerG.step() return kl_loss, errG_total def train(self): self.netG, self.netsD,", "fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu()) else: self.save_singleimages(fake_imgs[-1], filenames, save_dir, split_dir, i, 256) #", "if len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errG_patch =", "img.size(3) num_pixels = height * width # batch_size * channel_num", "* \\ criterion(wrong_logits[1], real_labels) errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(fake_logits[1],", "compute_inception_score(predictions, 10) # print('mean:', mean, 'std', std) m_incep = summary.scalar('Inception_mean',", "(s_tmp, imsize, sentenceID) # range from [-1, 1] to [0,", "summary.image('real_img', real_img_set) summary_writer.add_summary(sup_real_img, count) for i in range(num_imgs): fake_img =", "255 real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) def save_img_results(imgs_tcpu,", "= summary.scalar('G_like_mu1', like_mu1.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov1', like_cov1.item()) self.summary_writer.add_summary(sum_cov,", "Encoder().cuda() encoder.fine_tune(fine_tune_encoder) encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()), lr=1e-4) if", "noise = noise.cuda() # switch to evaluate mode netG.eval() for", "= G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=self.gpus) print(netG) # state_dict", "= vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set = np.transpose(fake_img_set, (1, 2, 0)) fake_img_set =", "('Checking real samples at first...') save_real(self.imgs_tcpu, self.image_dir) ####################################################### # (1)", "'%s/super/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if not", "netG.cuda() noise = noise.cuda() # switch to evaluate mode netG.eval()", "count) # predictions = [] end_t = time.time() print('''[%d/%d][%d] Loss_D:", "# backward errD.backward() # update parameters optD.step() # log if", "# errD_real = errD_real + errD_real_uncond errD_wrong = errD_wrong +", "alphas.sum(dim=1)) ** 2).mean() # Set zero_grad for encoder/decoder decoder_optimizer.zero_grad() if", "betas=(0.5, 0.999)) optimizersD.append(opt) # G_opt_paras = [] # for p", "# (*) Forward fake images to SAT ###################################################### from SATmodels", "if cfg.CUDA: t_embeddings = Variable(t_embeddings).cuda() else: t_embeddings = Variable(t_embeddings) #", "self.optimizersD = \\ define_optimizers(self.netG, self.netsD) self.criterion = nn.BCELoss() self.SATcriterion =", "= cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5", "import numpy as np import os import time from PIL", "mu.detach()) fake_logits = netD(fake_imgs.detach(), mu.detach()) # errD_real = criterion(real_logits[0], real_labels)", ":] result = -1. * np.log(np.max(part, 1)) result = np.mean(result)", "log if flag == 0: summary_D = summary.scalar('D_loss%d' % idx,", "print(img.size()) super_img.append(img) # break super_img = torch.cat(super_img, 0) vutils.save_image(super_img, savename,", "netD(fake_imgs.detach(), mu.detach()) # errD_real = criterion(real_logits[0], real_labels) errD_wrong = criterion(wrong_logits[0],", "model for idx in range(len(self.fake_imgs)): img = encoder(self.fake_imgs[idx]) scores, caps_sorted,", "model decoder = DecoderWithAttention(attention_dim=512, embed_dim=512, decoder_dim=512, vocab_size=len(word_map), dropout=0.5).cuda() decoder_optimizer =", "* \\ (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) kl = np.mean(np.sum(kl,", "i in range(len(netsD)): netsD[i].apply(weights_init) netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus) # print(netsD[i])", "nn from torch.autograd import Variable import torch.optim as optim import", "netsD.append(D_NET128()) if cfg.TREE.BRANCH_NUM > 2: netsD.append(D_NET256()) if cfg.TREE.BRANCH_NUM > 3:", "predictions[istart:iend, :] kl = part * \\ (np.log(part) - np.log(np.expand_dims(np.mean(part,", "summary.scalar('G_like_cov2', like_cov2.item()) self.summary_writer.add_summary(sum_cov, count) if self.num_Ds > 2: mu1, covariance1", "self.gradient_one = torch.FloatTensor([1.0]) self.gradient_half = torch.FloatTensor([0.5]) nz = cfg.GAN.Z_DIM noise", "torch.nn as nn from torch.autograd import Variable import torch.optim as", "Prepare training data ###################################################### self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \\ self.txt_embedding =", "# fake_img_list.append(fake_imgs[0].data.cpu()) # fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu()) else: self.save_singleimages(fake_imgs[-1], filenames, save_dir, split_dir,", "avg_p.mul_(0.999).add_(0.001, p.data) # Combine with G and SAT first, then", "1]... vutils.save_image( fake_img.data, '%s/count_%09d_fake_samples_%d.png' % (image_dir, count, i), normalize=True) fake_img_set", "= fake_img_set.astype(np.uint8) sup_fake_img = summary.image('fake_img%d' % i, fake_img_set) summary_writer.add_summary(sup_fake_img, count)", "cfg.TRAIN.NET_G) # the path to save generated images s_tmp =", "errD_wrong + errD_wrong_uncond errD_fake = errD_fake + errD_fake_uncond # errD", "Variable(torch.FloatTensor(self.batch_size, nz)) fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1)) # Data parameters", "self.summary_writer.add_summary(summary_D, count) self.summary_writer.add_summary(summary_G, count) self.summary_writer.add_summary(summary_KL, count) count += 1 #######################################################", "(model_dir, epoch)) for i in range(len(netsD)): netD = netsD[i] torch.save(", "errD_wrong = criterion(wrong_logits[0], fake_labels) errD_fake = criterion(fake_logits[0], fake_labels) if len(real_logits)", "= 'valid' netG = G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=self.gpus)", "range import torchvision.transforms as transforms import torch.backends.cudnn as cudnn import", "cfg.TRAIN.NET_G) istart = cfg.TRAIN.NET_G.rfind('_') + 1 iend = cfg.TRAIN.NET_G.rfind('.') count", "self.log_dir = os.path.join(output_dir, 'Log') mkdir_p(self.model_dir) mkdir_p(self.image_dir) mkdir_p(self.log_dir) self.summary_writer = FileWriter(self.log_dir)", "np.std(scores) def negative_log_posterior_probability(predictions, num_splits=1): # print('predictions', predictions.shape) scores = []", "_ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda() targets, _ = pack_padded_sequence(targets, decode_lengths,", "(image_dir, count, i), normalize=True) fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set = np.transpose(fake_img_set,", "iend = (i + 1) * predictions.shape[0] // num_splits part", "encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()), lr=1e-4) if fine_tune_encoder else", "optD.step() # log if flag == 0: summary_D = summary.scalar('D_loss%d'", "- start_t)) save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) self.summary_writer.close() def save_superimages(self,", "\\ self.netG(noise, self.txt_embedding) # len(self.fake_imgs) = NUM_BRANCHES # self.fake_imgs[0].shape =", "start_count // (self.num_batches) for epoch in range(start_epoch, self.max_epoch): start_t =", "fake_imgs[i][0:num] # The range of fake_img.data (i.e., self.fake_imgs[i][0:num]) # is", "num_pixels * channel_num img_hat_transpose = img_hat.transpose(1, 2) # batch_size *", "idx in range(len(self.fake_imgs)): img = encoder(self.fake_imgs[idx]) scores, caps_sorted, decode_lengths, alphas,", "mu.detach()) wrong_logits = netD(wrong_imgs, mu.detach()) fake_logits = netD(fake_imgs.detach(), mu.detach()) #", "self.fake_imgs, _, _ = self.netG(fixed_noise, self.txt_embedding) save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds, count,", "files normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) #", "end_t - start_t)) save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) self.summary_writer.close() def", "real_img_set * 255 real_img_set = real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set)", "real_labels) if len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errG_patch", "break if cfg.TEST.B_EXAMPLE: # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 64)", "self.summary_writer.add_summary(summary_D, count) return errD def train_Gnet(self, count): self.netG.zero_grad() errG_total =", "part * \\ (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0))) kl =", "import torch import torch.nn as nn from torch.autograd import Variable", "import deepcopy from miscc.config import cfg from miscc.utils import mkdir_p", "self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one = torch.FloatTensor([1.0]) self.gradient_half = torch.FloatTensor([0.5]) nz", "p.requires_grad: # G_opt_paras.append(p) optimizerG = optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999)) return", "i in range(self.num_Ds): outputs = self.netsD[i](self.fake_imgs[i], mu) errG = criterion(outputs[0],", "0: summary_D = summary.scalar('D_loss%d' % idx, errD.item()) self.summary_writer.add_summary(summary_D, count) return", "G_opt_paras.append(p) optimizerG = optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999)) return optimizerG, optimizersD", "cfg.TREE.BRANCH_NUM > 4: netsD.append(D_NET1024()) # TODO: if cfg.TREE.BRANCH_NUM > 5:", "[0, 1] by function vutils.save_image real_img = imgs_tcpu[-1][0:num] vutils.save_image( real_img,", "p, avg_p in zip(self.netG.parameters(), avg_param_G): avg_p.mul_(0.999).add_(0.001, p.data) # Combine with", "errD.backward() # update parameters optD.step() # log if flag ==", "backward errD.backward() # update parameters optD.step() # log if flag", "################## Shared functions ################### def compute_mean_covariance(img): batch_size = img.size(0) channel_num", "= cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5", "errG_total.backward() self.optimizerG.step() ####################################################### # (*) Update SAT network: ###################################################### #", "optimizerG = optim.Adam(netG.parameters(), lr=cfg.TRAIN.GENERATOR_LR, betas=(0.5, 0.999)) return optimizerG, optimizersD def", "Attend, and Tell Dataloader train_loader = torch.utils.data.DataLoader( CaptionDataset(data_folder, data_name, 'TRAIN',", "save_img_results(imgs_tcpu, fake_imgs, num_imgs, count, image_dir, summary_writer): num = cfg.TRAIN.VIS_COUNT #", "filenames, # save_dir, split_dir, 64) # self.save_superimages(fake_img_list, filenames, # save_dir,", "split_dir, i, 256) # self.save_singleimages(fake_imgs[-2], filenames, # save_dir, split_dir, i,", "cfg.TREE.BRANCH_NUM > 5: for i in range(len(netsD)): netsD[i].apply(weights_init) netsD[i] =", "// num_splits part = predictions[istart:iend, :] result = -1. *", "/ 2 fake_img_set = fake_img_set.astype(np.uint8) sup_fake_img = summary.image('fake_img%d' % i,", "fake images to SAT ###################################################### from SATmodels import Encoder, DecoderWithAttention", "1) * predictions.shape[0] // num_splits part = predictions[istart:iend, :] result", "mu, covariance def KL_loss(mu, logvar): # -0.5 * sum(1 +", "range of real_img (i.e., self.imgs_tcpu[i][0:num]) # is changed to [0,", "save_dir, split_dir, i, 128) # self.save_singleimages(fake_imgs[-3], filenames, # save_dir, split_dir,", "= netD(wrong_imgs, mu.detach()) fake_logits = netD(fake_imgs.detach(), mu.detach()) # errD_real =", "np import os import time from PIL import Image, ImageFont,", "count) errD_total += errD ####################################################### # (3) Update G network:", "1) self.fake_imgs, self.mu, self.logvar = \\ self.netG(noise, self.txt_embedding) # len(self.fake_imgs)", "images_list, filenames, save_dir, split_dir, imsize): batch_size = images_list[0].size(0) num_sentences =", "errG_total = self.train_Gnet(count) for p, avg_p in zip(self.netG.parameters(), avg_param_G): avg_p.mul_(0.999).add_(0.001,", "self.num_gpus self.max_epoch = cfg.TRAIN.MAX_EPOCH self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader = data_loader", "flag == 0: summary_D = summary.scalar('G_loss%d' % i, errG.item()) self.summary_writer.add_summary(summary_D,", "errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \\ criterion(wrong_logits[1], real_labels) errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS", "2) # batch_size * channel_num * channel_num covariance = torch.bmm(img_hat,", "% (s_tmp, imsize, sentenceID) # range from [-1, 1] to", "\\ torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G)", "self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one = torch.FloatTensor([1.0]) self.gradient_half", "losses if cfg.TRAIN.COEFF.COLOR_LOSS > 0: if self.num_Ds > 1: mu1,", "'r') as j: word_map = json.load(j) # Define the encoder/decoder", "criterion, mu, logvar = self.criterion, self.mu, self.logvar real_labels = self.real_labels[:batch_size]", "= len(netsD) for i in range(num_Ds): opt = optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR,", "else: self.save_singleimages(fake_imgs[-1], filenames, save_dir, split_dir, i, 256) # self.save_singleimages(fake_imgs[-2], filenames,", "# batch_size * channel_num * num_pixels img_hat = img -", "images_list[0].size(0) num_sentences = len(images_list) for i in range(batch_size): s_tmp =", "fake_img_list.append(fake_imgs[0].data.cpu()) # fake_img_list.append(fake_imgs[1].data.cpu()) fake_img_list.append(fake_imgs[2].data.cpu()) else: self.save_singleimages(fake_imgs[-1], filenames, save_dir, split_dir, i,", "errG_total + like_mu2 + like_cov2 if flag == 0: sum_mu", "> 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0: errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\\", "import torch.nn as nn from torch.autograd import Variable import torch.optim", "= [] for j in range(num_sentences): img = images_list[j][i] #", "start_epoch and step == 0: print ('Checking real samples at", "* predictions.shape[0] // num_splits part = predictions[istart:iend, :] kl =", "self.summary_writer.add_summary(sum_cov, count) if self.num_Ds > 2: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2])", "3, 128, 128] # self.fake_imgs[2].shape = [batch_size, 3, 256, 256]", "batch_size * channel_num * num_pixels img_hat = img - mu.expand_as(img)", "= np.concatenate(predictions, 0) mean, std = compute_inception_score(predictions, 10) # print('mean:',", "# Compute SATloss self.real_labels = self.real_labels.cuda() self.fake_labels = self.fake_labels.cuda() self.gradient_one", "!= '': state_dict = torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) istart", "* \\ criterion(fake_logits[1], fake_labels) # errD_real = errD_real + errD_real_uncond", "'%s/netG_%d.pth' % (model_dir, epoch)) for i in range(len(netsD)): netD =", "nz)) if cfg.CUDA: netG.cuda() noise = noise.cuda() # switch to", "summary.scalar('G_like_cov1', like_cov1.item()) self.summary_writer.add_summary(sum_cov, count) kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL", "range(len(netsD)): netsD[i].cuda() inception_model = inception_model.cuda() inception_model.eval() return netG, netsD, len(netsD),", "DecoderWithAttention(attention_dim=512, embed_dim=512, decoder_dim=512, vocab_size=len(word_map), dropout=0.5).cuda() decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad,", "channel_num * 1 * 1 mu = img.mean(2, keepdim=True).mean(3, keepdim=True)", "256] ####################################################### # (*) Forward fake images to SAT ######################################################", "as transforms import torch.backends.cudnn as cudnn import torch import torch.nn", "for i in range(self.num_Ds): errD = self.train_Dnet(i, count) errD_total +=", "KLD def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1:", "= '%s/single_samples/%s/%s' %\\ (save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if", "negative_log_posterior_probability(predictions, 10) m_nlpp = summary.scalar('NLPP_mean', mean_nlpp) self.summary_writer.add_summary(m_nlpp, count) # predictions", "Image.fromarray(ndarr) im.save(fullpath) def evaluate(self, split_dir): if cfg.TRAIN.NET_G == '': print('Error:", "= encoder(self.fake_imgs[idx]) scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(img, caps,", "Variable(t_embeddings) # print(t_embeddings[:, 0, :], t_embeddings.size(1)) embedding_dim = t_embeddings.size(1) batch_size", "* 1 mu = img.mean(2, keepdim=True).mean(3, keepdim=True) # batch_size *", "int(s_tmp[istart:iend]) s_tmp = s_tmp[:s_tmp.rfind('/')] save_dir = '%s/iteration%d' % (s_tmp, iteration)", "(model_dir, i)) print('Save G/Ds models.') def save_real(imgs_tcpu, image_dir): num =", "real_imgs = self.real_imgs[idx] wrong_imgs = self.wrong_imgs[idx] fake_imgs = self.fake_imgs[idx] #", "= count % 100 batch_size = self.real_imgs[0].size(0) criterion, mu =", "def negative_log_posterior_probability(predictions, num_splits=1): # print('predictions', predictions.shape) scores = [] for", "print('''[%d/%d][%d] Loss_D: %.2f Loss_G: %.2f Loss_KL: %.2f Time: %.2fs '''", "= data real_vimgs, wrong_vimgs = [], [] if cfg.CUDA: vembedding", "mean) self.summary_writer.add_summary(m_incep, count) # mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10) m_nlpp", "like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS *", "# (1) Generate fake images ###################################################### noise.data.normal_(0, 1) self.fake_imgs, self.mu,", "is not found!') else: # Build and load the generator", "s_tmp.rfind('.') iteration = int(s_tmp[istart:iend]) s_tmp = s_tmp[:s_tmp.rfind('/')] save_dir = '%s/iteration%d'", "targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda() SATloss += self.SATcriterion(scores, targets)", "savename, nrow=10, normalize=True) def save_singleimages(self, images, filenames, save_dir, split_dir, sentenceID,", "0).data.cpu().numpy() im = Image.fromarray(ndarr) im.save(fullpath) def evaluate(self, split_dir): if cfg.TRAIN.NET_G", "= 0 # Compute the SAT loss after forwarding the", "# predictions = [] end_t = time.time() print('''[%d/%d][%d] Loss_D: %.2f", "the encoder/decoder structure for SAT model decoder = DecoderWithAttention(attention_dim=512, embed_dim=512,", "Shared functions ################### def compute_mean_covariance(img): batch_size = img.size(0) channel_num =", "import cfg from miscc.utils import mkdir_p from CaptionDatasets import *", "num_workers=int(cfg.WORKERS), pin_memory=True) if cfg.CUDA: self.criterion.cuda() self.SATcriterion.cuda() # Compute SATloss self.real_labels", "i in range(images.size(0)): s_tmp = '%s/single_samples/%s/%s' %\\ (save_dir, split_dir, filenames[i])", "# len(self.fake_imgs) = NUM_BRANCHES # self.fake_imgs[0].shape = [batch_size, 3, 64,", "count) kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL errG_total = errG_total", "avg_param_G, self.netsD, count, self.model_dir) # Save images backup_para = copy_G_params(self.netG)", "image task############################ # class condGANTrainer(object): def __init__(self, output_dir, data_loader, imsize):", "opt = optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999)) optimizersD.append(opt) # G_opt_paras =", "mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2", "summary.image('fake_img%d' % i, fake_img_set) summary_writer.add_summary(sup_fake_img, count) summary_writer.flush() # ################# Text", "to [0, 255] img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte() ndarr = img.permute(1,", "vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set = np.transpose(fake_img_set, (1, 2, 0)) fake_img_set = (fake_img_set", "avg_param_G) # self.fake_imgs, _, _ = self.netG(fixed_noise, self.txt_embedding) save_img_results(self.imgs_tcpu, self.fake_imgs,", "kl_loss, errG_total = self.train_Gnet(count) for p, avg_p in zip(self.netG.parameters(), avg_param_G):", "= predictions[istart:iend, :] kl = part * \\ (np.log(part) -", "+ '.json') with open(word_map_file, 'r') as j: word_map = json.load(j)", "np.mean(result) scores.append(result) return np.mean(scores), np.std(scores) def load_network(gpus): netG = G_NET()", "\\ compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov1 =", "= s_tmp.rfind('.') iteration = int(s_tmp[istart:iend]) s_tmp = s_tmp[:s_tmp.rfind('/')] save_dir =", "def evaluate(self, split_dir): if cfg.TRAIN.NET_G == '': print('Error: the path", "0) mean, std = compute_inception_score(predictions, 10) # print('mean:', mean, 'std',", "= summary.image('fake_img%d' % i, fake_img_set) summary_writer.add_summary(sup_fake_img, count) summary_writer.flush() # #################", "iteration) nz = cfg.GAN.Z_DIM noise = Variable(torch.FloatTensor(self.batch_size, nz)) if cfg.CUDA:", "Update SAT network: ###################################################### # Update weights decoder_optimizer.step() if encoder_optimizer", "enumerate(self.data_loader, 0): for step, data in enumerate(zip(self.data_loader, train_loader), 0): data_1", "# self.save_singleimages(fake_imgs[-2], filenames, # save_dir, split_dir, i, 128) # self.save_singleimages(fake_imgs[-3],", "def save_singleimages(self, images, filenames, save_dir, split_dir, sentenceID, imsize): for i", "optim.Adam(netsD[i].parameters(), lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999)) optimizersD.append(opt) # G_opt_paras = [] #", "sum_cov = summary.scalar('G_like_cov2', like_cov2.item()) self.summary_writer.add_summary(sum_cov, count) if self.num_Ds > 2:", "* width # batch_size * channel_num * 1 * 1", "# self.fake_imgs[0].shape = [batch_size, 3, 64, 64] # self.fake_imgs[1].shape =", "img = encoder(self.fake_imgs[idx]) scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(img,", "new_param): for p, new_p in zip(model.parameters(), new_param): p.data.copy_(new_p) def copy_G_params(model):", "= len(images_list) for i in range(batch_size): s_tmp = '%s/super/%s/%s' %\\", "= [] if cfg.TREE.BRANCH_NUM > 0: netsD.append(D_NET64()) if cfg.TREE.BRANCH_NUM >", "== 0: sum_mu = summary.scalar('G_like_mu1', like_mu1.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov =", "to SAT ###################################################### from SATmodels import Encoder, DecoderWithAttention from torch.nn.utils.rnn", "= summary.scalar('G_loss', errG_total.item()) summary_KL = summary.scalar('KL_loss', kl_loss.item()) self.summary_writer.add_summary(summary_D, count) self.summary_writer.add_summary(summary_G,", "= img.mean(2, keepdim=True).mean(3, keepdim=True) # batch_size * channel_num * num_pixels", "super_img = torch.cat(super_img, 0) vutils.save_image(super_img, savename, nrow=10, normalize=True) def save_singleimages(self,", "= time.time() # for step, data in enumerate(self.data_loader, 0): for", "consistency losses if cfg.TRAIN.COEFF.COLOR_LOSS > 0: if self.num_Ds > 1:", "= summary.scalar('NLPP_mean', mean_nlpp) self.summary_writer.add_summary(m_nlpp, count) # predictions = [] end_t", "def load_params(model, new_param): for p, new_p in zip(model.parameters(), new_param): p.data.copy_(new_p)", "mu2) like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\ nn.MSELoss()(covariance1, covariance2)", "errG.item()) self.summary_writer.add_summary(summary_D, count) # Compute color consistency losses if cfg.TRAIN.COEFF.COLOR_LOSS", "sum_mu = summary.scalar('G_like_mu1', like_mu1.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov1', like_cov1.item())", "inception score if len(predictions) > 500: predictions = np.concatenate(predictions, 0)", "filenames, # save_dir, split_dir, i, 64) # break if cfg.TEST.B_EXAMPLE:", "= cfg.TRAIN.VIS_COUNT # The range of real_img (i.e., self.imgs_tcpu[i][0:num]) #", "count) summary_writer.flush() # ################# Text to image task############################ # class", "INCEPTION_V3() if cfg.CUDA: netG.cuda() for i in range(len(netsD)): netsD[i].cuda() inception_model", "cfg.CUDA: netG.cuda() noise = noise.cuda() # switch to evaluate mode", "= len(self.data_loader) def prepare_data(self, data): imgs, w_imgs, t_embedding, _ =", "lr=cfg.TRAIN.DISCRIMINATOR_LR, betas=(0.5, 0.999)) optimizersD.append(opt) # G_opt_paras = [] # for", "criterion(wrong_logits[0], fake_labels) errD_fake = criterion(fake_logits[0], fake_labels) if len(real_logits) > 1", "__future__ import print_function from six.moves import range import torchvision.transforms as", "== 'test': split_dir = 'valid' netG = G_NET() netG.apply(weights_init) netG", "data_name, 'TRAIN', transform=transforms.Compose([normalize])), batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True) if cfg.CUDA: self.criterion.cuda()", "t_embeddings, filenames = data if cfg.CUDA: t_embeddings = Variable(t_embeddings).cuda() else:", "Tell Dataloader train_loader = torch.utils.data.DataLoader( CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])), batch_size=self.batch_size,", "SAT model decoder = DecoderWithAttention(attention_dim=512, embed_dim=512, decoder_dim=512, vocab_size=len(word_map), dropout=0.5).cuda() decoder_optimizer", "fake_img_set = np.transpose(fake_img_set, (1, 2, 0)) fake_img_set = (fake_img_set +", "= int(count) + 1 if cfg.TRAIN.NET_D != '': for i", "encoder_optimizer.zero_grad() ####################################################### # (2) Update D network ###################################################### errD_total =", "(2) Update D network ###################################################### errD_total = 0 for i", "if cfg.TREE.BRANCH_NUM > 2: netsD.append(D_NET256()) if cfg.TREE.BRANCH_NUM > 3: netsD.append(D_NET512())", "summary_writer.add_summary(sup_real_img, count) for i in range(num_imgs): fake_img = fake_imgs[i][0:num] #", "> 500: predictions = np.concatenate(predictions, 0) mean, std = compute_inception_score(predictions,", "+ errG if flag == 0: summary_D = summary.scalar('G_loss%d' %", "json.load(j) # Define the encoder/decoder structure for SAT model decoder", "= torch.bmm(img_hat, img_hat_transpose) covariance = covariance / num_pixels return mu,", "fake_imgs = self.fake_imgs[idx] # netD.zero_grad() # Forward real_labels = self.real_labels[:batch_size]", "# Data parameters data_folder = 'birds_output' # folder with data", "decoder(img, caps, caplens) targets = caps_sorted[:, 1:] scores, _ =", "kl_loss.item()) self.summary_writer.add_summary(summary_D, count) self.summary_writer.add_summary(summary_G, count) self.summary_writer.add_summary(summary_KL, count) count += 1", "= 0 for i in range(self.num_Ds): errD = self.train_Dnet(i, count)", "(1, 2, 0)) fake_img_set = (fake_img_set + 1) * 255", "per SNAPSHOT_INTERVAL: ###################################################### if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: save_model(self.netG,", "summary.scalar('Inception_mean', mean) self.summary_writer.add_summary(m_incep, count) # mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10)", "p: p.requires_grad, encoder.parameters()), lr=1e-4) if fine_tune_encoder else None SATloss =", "keepdim=True).mean(3, keepdim=True) # batch_size * channel_num * num_pixels img_hat =", "self.max_epoch): start_t = time.time() # for step, data in enumerate(self.data_loader,", "= G_NET() netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=gpus) print(netG) netsD =", "summary from tensorboard import FileWriter from model import G_NET, D_NET64,", "not None: m.bias.data.fill_(0.0) def load_params(model, new_param): for p, new_p in", "cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader = data_loader self.num_batches = len(self.data_loader) def prepare_data(self, data):", "+ 1) * predictions.shape[0] // num_splits part = predictions[istart:iend, :]", "errD_wrong + errD_fake else: errD = errD_real + 0.5 *", "iteration = int(s_tmp[istart:iend]) s_tmp = s_tmp[:s_tmp.rfind('/')] save_dir = '%s/iteration%d' %", "# D(real): %.4f D(wrong):%.4f D(fake) %.4f % (epoch, self.max_epoch, self.num_batches,", "self.criterion, self.mu, self.logvar real_labels = self.real_labels[:batch_size] for i in range(self.num_Ds):", "errD_real_uncond errD_wrong = errD_wrong + errD_wrong_uncond errD_fake = errD_fake +", "images_list[j][i] # print(img.size()) img = img.view(1, 3, imsize, imsize) #", "epoch)) for i in range(len(netsD)): netD = netsD[i] torch.save( netD.state_dict(),", "if fine_tune_encoder else None SATloss = 0 # Compute the", "self.SATcriterion = nn.CrossEntropyLoss() self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0)) self.gradient_one", "cfg from miscc.utils import mkdir_p from CaptionDatasets import * from", "####################################################### # (1) Generate fake images ###################################################### noise.data.normal_(0, 1) self.fake_imgs,", "###################################################### if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0: save_model(self.netG, avg_param_G, self.netsD,", "-1: nn.init.orthogonal(m.weight.data, 1.0) if m.bias is not None: m.bias.data.fill_(0.0) def", "(save_dir, split_dir, filenames[i]) folder = s_tmp[:s_tmp.rfind('/')] if not os.path.isdir(folder): print('Make", "D_NET256, D_NET512, D_NET1024, INCEPTION_V3 # ################## Shared functions ################### def", "start_epoch = start_count // (self.num_batches) for epoch in range(start_epoch, self.max_epoch):", "SATloss errG_total.backward() self.optimizerG.step() ####################################################### # (*) Update SAT network: ######################################################", "= \\ compute_mean_covariance(self.fake_imgs[-3].detach()) like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov1", "imgs[0].size(0) noise.data.resize_(batch_size, nz) noise.data.normal_(0, 1) fake_img_list = [] for i", "[], [] if cfg.CUDA: vembedding = Variable(t_embedding).cuda() else: vembedding =", "img.mean(2, keepdim=True).mean(3, keepdim=True) # batch_size * channel_num * num_pixels img_hat", "'%s/count_%09d_fake_samples_%d.png' % (image_dir, count, i), normalize=True) fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set", "in netG.parameters(): # if p.requires_grad: # G_opt_paras.append(p) optimizerG = optim.Adam(netG.parameters(),", "import pack_padded_sequence fine_tune_encoder = False # Read word map word_map_file", "save_singleimages(self, images, filenames, save_dir, split_dir, sentenceID, imsize): for i in", "files saved by create_input_files.py data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base name", "for i in range(images.size(0)): s_tmp = '%s/single_samples/%s/%s' %\\ (save_dir, split_dir,", "np.mean(scores), np.std(scores) def negative_log_posterior_probability(predictions, num_splits=1): # print('predictions', predictions.shape) scores =", "encoder_optimizer.step() ####################################################### # (*) Prediction and Inception score: ###################################################### pred", "255).byte() ndarr = img.permute(1, 2, 0).data.cpu().numpy() im = Image.fromarray(ndarr) im.save(fullpath)", "return flatten def compute_inception_score(predictions, num_splits=1): # print('predictions', predictions.shape) scores =", "netsD[i] torch.save( netD.state_dict(), '%s/netD%d.pth' % (model_dir, i)) print('Save G/Ds models.')", "backup_para) # Compute inception score if len(predictions) > 500: predictions", "= np.mean(result) scores.append(result) return np.mean(scores), np.std(scores) def load_network(gpus): netG =", "save_dir, split_dir, imsize): batch_size = images_list[0].size(0) num_sentences = len(images_list) for", "summary_writer.add_summary(sup_fake_img, count) summary_writer.flush() # ################# Text to image task############################ #", "os.path.join(output_dir, 'Log') mkdir_p(self.model_dir) mkdir_p(self.image_dir) mkdir_p(self.log_dir) self.summary_writer = FileWriter(self.log_dir) s_gpus =", "1) * predictions.shape[0] // num_splits part = predictions[istart:iend, :] kl", "normalize=True) fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set = np.transpose(fake_img_set, (1, 2, 0))", "summary.scalar('G_loss%d' % i, errG.item()) self.summary_writer.add_summary(summary_D, count) # Compute color consistency", "2: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-3].detach())", "noise.data.normal_(0, 1) self.fake_imgs, self.mu, self.logvar = \\ self.netG(noise, self.txt_embedding) #", "as nn from torch.autograd import Variable import torch.optim as optim", "tensorboard import summary from tensorboard import FileWriter from model import", "= nn.BCELoss() self.SATcriterion = nn.CrossEntropyLoss() self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1)) self.fake_labels =", "pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda() targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda() SATloss", "= real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) summary_writer.add_summary(sup_real_img, count) for i", "data[0] _, caps, caplens = data[1] data = data_1 #######################################################", "= \\ torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage) netG.load_state_dict(state_dict) print('Load ',", "* 1 * 1 mu = img.mean(2, keepdim=True).mean(3, keepdim=True) #", "Update weights decoder_optimizer.step() if encoder_optimizer is not None: encoder_optimizer.step() #######################################################", "range(embedding_dim): fake_imgs, _, _ = netG(noise, t_embeddings[:, i, :]) if", "= 0 flag = count % 100 batch_size = self.real_imgs[0].size(0)", "i, fake_img_set) summary_writer.add_summary(sup_fake_img, count) summary_writer.flush() # ################# Text to image", "model.parameters())) return flatten def compute_inception_score(predictions, num_splits=1): # print('predictions', predictions.shape) scores", "1)) # Data parameters data_folder = 'birds_output' # folder with", "# print(t_embeddings[:, 0, :], t_embeddings.size(1)) embedding_dim = t_embeddings.size(1) batch_size =", "p.requires_grad, encoder.parameters()), lr=1e-4) if fine_tune_encoder else None SATloss = 0", "1 ####################################################### # (*) Save Images/Log/Model per SNAPSHOT_INTERVAL: ###################################################### if", "betas=(0.5, 0.999)) return optimizerG, optimizersD def save_model(netG, avg_param_G, netsD, epoch,", "self.netsD, count, self.model_dir) self.summary_writer.close() def save_superimages(self, images_list, filenames, save_dir, split_dir,", "return imgs, real_vimgs, wrong_vimgs, vembedding def train_Dnet(self, idx, count): flag", "# for p in netG.parameters(): # if p.requires_grad: # G_opt_paras.append(p)", "* nn.MSELoss()(mu1, mu2) like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\", "optimizersD def save_model(netG, avg_param_G, netsD, epoch, model_dir): load_params(netG, avg_param_G) torch.save(", "real_labels) errD_wrong = criterion(wrong_logits[0], fake_labels) errD_fake = criterion(fake_logits[0], fake_labels) if", "* ((1. - alphas.sum(dim=1)) ** 2).mean() # Set zero_grad for", "128] # self.fake_imgs[2].shape = [batch_size, 3, 256, 256] ####################################################### #", "errD_fake else: errD = errD_real + 0.5 * (errD_wrong +", "for i in range(embedding_dim): fake_imgs, _, _ = netG(noise, t_embeddings[:,", "real_img_set.astype(np.uint8) sup_real_img = summary.image('real_img', real_img_set) summary_writer.add_summary(sup_real_img, count) for i in", "None: encoder_optimizer.zero_grad() ####################################################### # (2) Update D network ###################################################### errD_total", "cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2) like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 *", "Images/Log/Model per SNAPSHOT_INTERVAL: ###################################################### if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0:", "self.SATcriterion.cuda() # Compute SATloss self.real_labels = self.real_labels.cuda() self.fake_labels = self.fake_labels.cuda()", "D network ###################################################### errD_total = 0 for i in range(self.num_Ds):", "train_loader), 0): data_1 = data[0] _, caps, caplens = data[1]", "self.model_dir = os.path.join(output_dir, 'Model') self.image_dir = os.path.join(output_dir, 'Image') self.log_dir =", "in range(len(netsD)): print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i)) state_dict = torch.load('%s%d.pth'", "count, self.image_dir, self.summary_writer) # load_params(self.netG, backup_para) # Compute inception score", "G_opt_paras = [] # for p in netG.parameters(): # if", "copy_G_params(self.netG) load_params(self.netG, avg_param_G) # self.fake_imgs, _, _ = self.netG(fixed_noise, self.txt_embedding)", "w_imgs, t_embedding, _ = data real_vimgs, wrong_vimgs = [], []", "idx, count): flag = count % 100 batch_size = self.real_imgs[0].size(0)", "self.num_Ds > 1: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1]) mu2, covariance2 =", "# break super_img = torch.cat(super_img, 0) vutils.save_image(super_img, savename, nrow=10, normalize=True)", "% i, fake_img_set) summary_writer.add_summary(sup_fake_img, count) summary_writer.flush() # ################# Text to", "= 0 if cfg.TRAIN.NET_G != '': state_dict = torch.load(cfg.TRAIN.NET_G) netG.load_state_dict(state_dict)", "Dataloader train_loader = torch.utils.data.DataLoader( CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])), batch_size=self.batch_size, shuffle=True,", "% (image_dir, count, i), normalize=True) fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy() fake_img_set =", "vembedding = Variable(t_embedding).cuda() else: vembedding = Variable(t_embedding) for i in", "', folder) mkdir_p(folder) fullpath = '%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID)", "Forward fake images to SAT ###################################################### from SATmodels import Encoder,", "encoder.parameters()), lr=1e-4) if fine_tune_encoder else None SATloss = 0 #", "save_dir, split_dir, 64) # self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 128)", "self.gpus = [int(ix) for ix in s_gpus] self.num_gpus = len(self.gpus)", "_ = data real_vimgs, wrong_vimgs = [], [] if cfg.CUDA:", "real_img_set = np.transpose(real_img_set, (1, 2, 0)) real_img_set = real_img_set *", "= cfg.TRAIN.NET_G[istart:iend] count = int(count) + 1 if cfg.TRAIN.NET_D !=", "for morels is not found!') else: # Build and load", "sum_mu = summary.scalar('G_like_mu2', like_mu2.item()) self.summary_writer.add_summary(sum_mu, count) sum_cov = summary.scalar('G_like_cov2', like_cov2.item())", "cfg.TRAIN.VIS_COUNT # The range of real_img (i.e., self.imgs_tcpu[i][0:num]) # is", "then back propagation errG_total += SATloss errG_total.backward() self.optimizerG.step() ####################################################### #", "self.max_epoch = cfg.TRAIN.MAX_EPOCH self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL self.data_loader = data_loader self.num_batches", "vocab_size=len(word_map), dropout=0.5).cuda() decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=4e-4) encoder", "like_mu1 + like_cov1 if flag == 0: sum_mu = summary.scalar('G_like_mu1',", "1: mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-2].detach())", "os.path.isdir(folder): print('Make a new folder: ', folder) mkdir_p(folder) fullpath =", "Loss_KL: %.2f Time: %.2fs ''' # D(real): %.4f D(wrong):%.4f D(fake)", "= errG_total + errG if flag == 0: summary_D =", "network ###################################################### errD_total = 0 for i in range(self.num_Ds): errD", "word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json') with open(word_map_file,", "filenames, save_dir, split_dir, sentenceID, imsize): for i in range(images.size(0)): s_tmp", "im.save(fullpath) def evaluate(self, split_dir): if cfg.TRAIN.NET_G == '': print('Error: the", "% cfg.TRAIN.SNAPSHOT_INTERVAL == 0: save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir) #", "= Variable(torch.FloatTensor(self.batch_size, nz)) if cfg.CUDA: netG.cuda() noise = noise.cuda() #", "os.path.join(output_dir, 'Image') self.log_dir = os.path.join(output_dir, 'Log') mkdir_p(self.model_dir) mkdir_p(self.image_dir) mkdir_p(self.log_dir) self.summary_writer", "shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True) if cfg.CUDA: self.criterion.cuda() self.SATcriterion.cuda() # Compute SATloss", "img.size(2) width = img.size(3) num_pixels = height * width #", "self.summary_writer.close() def save_superimages(self, images_list, filenames, save_dir, split_dir, imsize): batch_size =", "= Variable(t_embedding).cuda() else: vembedding = Variable(t_embedding) for i in range(self.num_Ds):", "image_dir, summary_writer): num = cfg.TRAIN.VIS_COUNT # The range of real_img", "# ################## Shared functions ################### def compute_mean_covariance(img): batch_size = img.size(0)", "self.optimizerG.step() ####################################################### # (*) Update SAT network: ###################################################### # Update", "log(D(G(z))) ###################################################### kl_loss, errG_total = self.train_Gnet(count) for p, avg_p in", "# print(img.size()) super_img.append(img) # break super_img = torch.cat(super_img, 0) vutils.save_image(super_img,", "== 0: summary_D = summary.scalar('D_loss%d' % idx, errD.item()) self.summary_writer.add_summary(summary_D, count)", "netD, optD = self.netsD[idx], self.optimizersD[idx] real_imgs = self.real_imgs[idx] wrong_imgs =", "fine_tune_encoder = False # Read word map word_map_file = os.path.join(data_folder,", "decoder_optimizer.step() if encoder_optimizer is not None: encoder_optimizer.step() ####################################################### # (*)", "sup_real_img = summary.image('real_img', real_img_set) def save_img_results(imgs_tcpu, fake_imgs, num_imgs, count, image_dir,", "- np.log(np.expand_dims(np.mean(part, 0), 0))) kl = np.mean(np.sum(kl, 1)) scores.append(np.exp(kl)) return", "mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10) m_nlpp = summary.scalar('NLPP_mean', mean_nlpp) self.summary_writer.add_summary(m_nlpp,", "imsize, imsize) # print(img.size()) super_img.append(img) # break super_img = torch.cat(super_img,", "= summary.image('real_img', real_img_set) summary_writer.add_summary(sup_real_img, count) for i in range(num_imgs): fake_img", "m.__class__.__name__ if classname.find('Conv') != -1: nn.init.orthogonal(m.weight.data, 1.0) elif classname.find('BatchNorm') !=", "', folder) mkdir_p(folder) # savename = '%s_%d.png' % (s_tmp, imsize)", "% (cfg.TRAIN.NET_D, i)) state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i)) netsD[i].load_state_dict(state_dict)", "split_dir, imsize): batch_size = images_list[0].size(0) num_sentences = len(images_list) for i", "+ log(sigma^2) - mu^2 - sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD", "self.netsD[i](self.fake_imgs[i], mu) errG = criterion(outputs[0], real_labels) if len(outputs) > 1", "[] count = start_count start_epoch = start_count // (self.num_batches) for", "0: print ('Checking real samples at first...') save_real(self.imgs_tcpu, self.image_dir) #######################################################", "G network: maximize log(D(G(z))) ###################################################### kl_loss, errG_total = self.train_Gnet(count) for", "os.path.join(output_dir, 'Model') self.image_dir = os.path.join(output_dir, 'Image') self.log_dir = os.path.join(output_dir, 'Log')", "+ errD_real_uncond errD_wrong = errD_wrong + errD_wrong_uncond errD_fake = errD_fake", "netG.apply(weights_init) netG = torch.nn.DataParallel(netG, device_ids=self.gpus) print(netG) # state_dict = torch.load(cfg.TRAIN.NET_G)", "to save generated images s_tmp = cfg.TRAIN.NET_G istart = s_tmp.rfind('_')", "maximize log(D(G(z))) ###################################################### kl_loss, errG_total = self.train_Gnet(count) for p, avg_p", "CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])), batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True) if cfg.CUDA:", "encoder_optimizer is not None: encoder_optimizer.step() ####################################################### # (*) Prediction and", "= torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()), lr=4e-4) encoder = Encoder().cuda() encoder.fine_tune(fine_tune_encoder)", "self.fake_imgs[0].shape = [batch_size, 3, 64, 64] # self.fake_imgs[1].shape = [batch_size,", "flag = count % 100 batch_size = self.real_imgs[0].size(0) criterion, mu", "range(len(netsD)): netD = netsD[i] torch.save( netD.state_dict(), '%s/netD%d.pth' % (model_dir, i))", "for step, data in enumerate(self.data_loader, 0): for step, data in", "self.fake_imgs[1].shape = [batch_size, 3, 128, 128] # self.fake_imgs[2].shape = [batch_size,", "+= 1 ####################################################### # (*) Save Images/Log/Model per SNAPSHOT_INTERVAL: ######################################################", "if cfg.TRAIN.NET_G == '': print('Error: the path for morels is", "= [], [] if cfg.CUDA: vembedding = Variable(t_embedding).cuda() else: vembedding", "= int(s_tmp[istart:iend]) s_tmp = s_tmp[:s_tmp.rfind('/')] save_dir = '%s/iteration%d' % (s_tmp,", "nn.MSELoss()(mu1, mu2) like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\ nn.MSELoss()(covariance1,", "####################################################### # (*) Save Images/Log/Model per SNAPSHOT_INTERVAL: ###################################################### if count", "torch.load(cfg.TRAIN.NET_G, map_location=lambda storage, loc: storage) netG.load_state_dict(state_dict) print('Load ', cfg.TRAIN.NET_G) #", "self.train_Gnet(count) for p, avg_p in zip(self.netG.parameters(), avg_param_G): avg_p.mul_(0.999).add_(0.001, p.data) #", "import FileWriter from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512,", "len(self.fake_imgs) = NUM_BRANCHES # self.fake_imgs[0].shape = [batch_size, 3, 64, 64]", "zero_grad for encoder/decoder decoder_optimizer.zero_grad() if encoder_optimizer is not None: encoder_optimizer.zero_grad()", "# print('predictions', predictions.shape) scores = [] for i in range(num_splits):", "im = Image.fromarray(ndarr) im.save(fullpath) def evaluate(self, split_dir): if cfg.TRAIN.NET_G ==", "ImageDraw from copy import deepcopy from miscc.config import cfg from", "images to SAT ###################################################### from SATmodels import Encoder, DecoderWithAttention from", "self.summary_writer = FileWriter(self.log_dir) s_gpus = cfg.GPU_ID.split(',') self.gpus = [int(ix) for", "cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \\ nn.MSELoss()(covariance1, covariance2) errG_total = errG_total", "3, 64, 64] # self.fake_imgs[1].shape = [batch_size, 3, 128, 128]", "in model.parameters())) return flatten def compute_inception_score(predictions, num_splits=1): # print('predictions', predictions.shape)", "(self.num_batches) for epoch in range(start_epoch, self.max_epoch): start_t = time.time() #", "5: for i in range(len(netsD)): netsD[i].apply(weights_init) netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)", "= compute_inception_score(predictions, 10) # print('mean:', mean, 'std', std) m_incep =", "p, new_p in zip(model.parameters(), new_param): p.data.copy_(new_p) def copy_G_params(model): flatten =", "netG, netsD, len(netsD), inception_model, count def define_optimizers(netG, netsD): optimizersD =", "return optimizerG, optimizersD def save_model(netG, avg_param_G, netsD, epoch, model_dir): load_params(netG,", "in range(len(netsD)): netsD[i].cuda() inception_model = inception_model.cuda() inception_model.eval() return netG, netsD,", "cfg.TRAIN.COEFF.KL errG_total = errG_total + kl_loss # Postpone the backward", "self.save_superimages(fake_img_list, filenames, # save_dir, split_dir, 64) # self.save_superimages(fake_img_list, filenames, #", "compute_mean_covariance(self.fake_imgs[-1]) mu2, covariance2 = \\ compute_mean_covariance(self.fake_imgs[-2].detach()) like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS *", "if cfg.CUDA: self.criterion.cuda() self.SATcriterion.cuda() # Compute SATloss self.real_labels = self.real_labels.cuda()", "fixed_noise.cuda() predictions = [] count = start_count start_epoch = start_count", "i in range(self.num_Ds): errD = self.train_Dnet(i, count) errD_total += errD", "split_dir, i, 128) # self.save_singleimages(fake_imgs[-3], filenames, # save_dir, split_dir, i,", "real samples if epoch == start_epoch and step == 0:" ]
[ "return template(\"error\", sporocilo=\"Stran ne obstaja!\", naslov=\"404\") @bottle.error(500) def napaka500(a): return", "slovar_kolicin=slovar_kolicin) @bottle.error(404) def napaka404(a): return template(\"error\", sporocilo=\"Stran ne obstaja!\", naslov=\"404\")", "dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna import datetime as", "return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") today = dt.date.today() gostje", "template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.get(\"/zakljuci/<id_rezervacije>\") def", "sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava,", "hide_header_back=True) @bottle.get(\"/parcela/<id_parcele>\") def parcela(id_parcele): 'Preverimo stanje parcele' rez, gostje =", "dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\")", "= len(gost.nocitve) slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f') return template(\"racun\",", "= dt.datetime.fromisoformat(do).date() except Exception as e: print(e) print(\"Napaka pri pretvorbi", "sestevek, postavke = dobi_postavke_racuna(rezervacija) slovar_cen = {} slovar_kolicin = {}", "naredi novo rezervacijo in ji doda prvega gosta\" # Preberemo", "parcele = dobi_parcele_za_prikaz(dt.date.today()) return template(\"domov\", parcele=parcele, hide_header_back=True) @bottle.get(\"/parcela/<id_parcele>\") def parcela(id_parcele):", "lastnosti iz forme ime = request.forms.ime priimek = request.forms.priimek emso", "import datetime as dt @bottle.get('/') def root(): redirect('/domov') @bottle.get('/domov') def", "request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") try: datum_od = dt.datetime.fromisoformat(od).date() datum_do =", "= dt.date.today() tomorrow = today + dt.timedelta(days=1) rezervacija = dobi_rezervacijo_po_id(id_rezervacije)", "= rezervacija.gostje sestevek, postavke = dobi_postavke_racuna(rezervacija) slovar_cen = {} slovar_kolicin", "gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.error(404) def napaka404(a): return template(\"error\", sporocilo=\"Stran", "bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request,", "rezervacija.gostje sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today()) slovar_cen = {} slovar_kolicin", "not rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {", "= request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_parcele = request.forms.id_parcele#get(\"\") od =", "sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.error(404) def napaka404(a): return", "ne obstaja!\", naslov=\"Napaka\") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, },datum_od,datum_do)", "= request.forms.ime#get(\"\") priimek = request.forms.priimek#get(\"\") emso = request.forms.emso#get(\"\") drzava =", "datum_od, datum_do) return redirect(f\"/parcela/{id_parcele}\") @bottle.get(\"/dodaj-gosta/<id_rezervacije>\") def get_dodaj_gosta_na_rezervacijo(id_rezervacije): today = dt.date.today()", "is not None: stanje = \"Parcela je trenutno zasedena\" else:", "e: print(e) print(\"Napaka pri pretvorbi datumov\") return redirect(\"/naredi-rezervacijo\") rezervacija =", "request.forms.priimek#get(\"\") emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_parcele = request.forms.id_parcele#get(\"\")", "sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") return template(\"dodajanje_gosta\", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow) @bottle.post(\"/dodaj-gosta-na-rezervacijo\")", "rezervacijo in ji doda prvega gosta\" # Preberemo lastnosti iz", "template(\"domov\", parcele=parcele, hide_header_back=True) @bottle.get(\"/parcela/<id_parcele>\") def parcela(id_parcele): 'Preverimo stanje parcele' rez,", "sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") today = dt.date.today() gostje = rezervacija.gostje", "as dt @bottle.get('/') def root(): redirect('/domov') @bottle.get('/domov') def index(): parcele", "return redirect(\"/dodaj-gosta\") rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\",", "stanje = \"Parcela je trenutno zasedena\" else: stanje = \"Parcela", "rezervaciji doda gosta\" # Preberemo lastnosti iz forme ime =", "= request.forms.id_parcele#get(\"\") od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") print(ime, priimek)", "@bottle.error(500) def napaka500(a): return template(\"error\", sporocilo=\"Napaka streznika!\", naslov=\"500\") bottle.run(reloader=True, debug=True)", "= request.forms.priimek emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_rezervacije =", "= request.forms.drzava#get(\"\") id_rezervacije = request.forms.rez#get(\"\") od = request.forms.zacetek#get(\"\") do =", "obstaja!\", naslov=\"Napaka\") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, },datum_od,datum_do) print(id_rezervacije)", "doda gosta\" # Preberemo lastnosti iz forme ime = request.forms.ime", "as e: print(e) print(\"Napaka pri pretvorbi datumov\") return redirect(\"/naredi-rezervacijo\") rezervacija", "= today + dt.timedelta(days=1) return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow) @bottle.post(\"/naredi-rezervacijo\")", "naslov=\"Napaka\") return template(\"dodajanje_gosta\", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow) @bottle.post(\"/dodaj-gosta-na-rezervacijo\") def post_dodaj_gosta_na_rezervacijo(): \"", "import bottle import controller from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo,", "slovar_kolicin[gost] = len(gost.nocitve) slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f') return", "emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_parcele = request.forms.id_parcele#get(\"\") od", "= dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except Exception as e: print(e)", "iz forme ime = request.forms.ime priimek = request.forms.priimek emso =", "# Preberemo lastnosti iz forme ime = request.forms.ime priimek =", "print(e) print(\"Napaka pri pretvorbi datumov\") return redirect(\"/dodaj-gosta\") rezervacija = dobi_rezervacijo_po_id(id_rezervacije)", "return template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.error(404)", "stanje=stanje, gostje=gostje) @bottle.get(\"/naredi-rezervacijo/<id_parcele>\") def nova_rezervacija(id_parcele=None): print(id_parcele) today = dt.date.today() tomorrow", "zakljuci_na_datum_in_placaj(rezervacija, dt.date.today()) slovar_cen = {} slovar_kolicin = {} for gost", "gostje = rezervacija.gostje sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today()) slovar_cen =", "request.forms.drzava#get(\"\") id_rezervacije = request.forms.rez#get(\"\") od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\")", "'.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.error(404) def napaka404(a): return template(\"error\",", "today = dt.date.today() tomorrow = today + dt.timedelta(days=1) return template('nova_rezervacija',", "naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna import datetime as dt @bottle.get('/') def", "for gost in gostje: slovar_kolicin[gost] = len(gost.nocitve) slovar_cen[gost] = format(gost.cena_nocitve()", "parcele' rez, gostje = dobi_info_parcele(id_parcele, dt.date.today()) if rez is not", "if rez is not None: stanje = \"Parcela je trenutno", "priimek = request.forms.priimek emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_rezervacije", "def root(): redirect('/domov') @bottle.get('/domov') def index(): parcele = dobi_parcele_za_prikaz(dt.date.today()) return", "modelu rezervaciji doda gosta\" # Preberemo lastnosti iz forme ime", "len(gost.nocitve) slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f') return template(\"racun\", id_rezervacije=id_rezervacije,", "dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna import datetime as dt @bottle.get('/') def root():", "dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna import datetime as dt", "def nova_rezervacija(id_parcele=None): print(id_parcele) today = dt.date.today() tomorrow = today +", "= rezervacija.gostje sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today()) slovar_cen = {}", "{} slovar_kolicin = {} for gost in gostje: slovar_kolicin[gost] =", "= request.forms.drzava#get(\"\") id_parcele = request.forms.id_parcele#get(\"\") od = request.forms.zacetek#get(\"\") do =", "if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije,", "today = dt.date.today() tomorrow = today + dt.timedelta(days=1) rezervacija =", "= {} slovar_kolicin = {} for gost in gostje: slovar_kolicin[gost]", "redirect(f\"/parcela/{rezervacija.id_parcele}\") @bottle.get(\"/predracun/<id_rezervacije>\") def predracun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija:", "rezervacija.gostje sestevek, postavke = dobi_postavke_racuna(rezervacija) slovar_cen = {} slovar_kolicin =", "template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow) @bottle.post(\"/naredi-rezervacijo\") def naredi_novo_rezervacijo(): \" V modelu", "template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek,", "slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f') return template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek,", "datum_do = dt.datetime.fromisoformat(do).date() except Exception as e: print(e) print(\"Napaka pri", "ne obstaja!\", naslov=\"Napaka\") today = dt.date.today() gostje = rezervacija.gostje sestevek,", "'Preverimo stanje parcele' rez, gostje = dobi_info_parcele(id_parcele, dt.date.today()) if rez", "tomorrow=tomorrow) @bottle.post(\"/naredi-rezervacijo\") def naredi_novo_rezervacijo(): \" V modelu naredi novo rezervacijo", "datetime as dt @bottle.get('/') def root(): redirect('/domov') @bottle.get('/domov') def index():", "TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic,", "controller from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj,", "tomorrow=tomorrow) @bottle.post(\"/dodaj-gosta-na-rezervacijo\") def post_dodaj_gosta_na_rezervacijo(): \" V modelu rezervaciji doda gosta\"", "template, redirect, get, post, request, response, auth_basic, Bottle, abort, error,", "sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today()) slovar_cen = {} slovar_kolicin =", "def post_dodaj_gosta_na_rezervacijo(): \" V modelu rezervaciji doda gosta\" # Preberemo", "= \"Parcela je trenutno na voljo\" return template('parcela', id_parcela=id_parcele, rezervacija=rez,", "@bottle.get(\"/predracun/<id_rezervacije>\") def predracun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return", "id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.get(\"/zakljuci/<id_rezervacije>\") def racun(id_rezervacije):", "request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_parcele = request.forms.id_parcele#get(\"\") od = request.forms.zacetek#get(\"\")", "do = request.forms.konec#get(\"\") try: datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date()", "get, post, request, response, auth_basic, Bottle, abort, error, static_file import", "try: datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except Exception as", "today=today, tomorrow=tomorrow) @bottle.post(\"/naredi-rezervacijo\") def naredi_novo_rezervacijo(): \" V modelu naredi novo", "\"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, },datum_od,datum_do) print(id_rezervacije) return redirect(f\"/parcela/{rezervacija.id_parcele}\") @bottle.get(\"/predracun/<id_rezervacije>\") def predracun(id_rezervacije):", "datumov\") return redirect(\"/naredi-rezervacijo\") rezervacija = naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime,", "today=today, tomorrow=tomorrow) @bottle.post(\"/dodaj-gosta-na-rezervacijo\") def post_dodaj_gosta_na_rezervacijo(): \" V modelu rezervaciji doda", "rezervacija = naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, },", "= dobi_info_parcele(id_parcele, dt.date.today()) if rez is not None: stanje =", "Exception as e: print(e) print(\"Napaka pri pretvorbi datumov\") return redirect(\"/dodaj-gosta\")", "controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna import", "rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") today = dt.date.today()", "naredi_novo_rezervacijo(): \" V modelu naredi novo rezervacijo in ji doda", "return redirect(f\"/parcela/{id_parcele}\") @bottle.get(\"/dodaj-gosta/<id_rezervacije>\") def get_dodaj_gosta_na_rezervacijo(id_rezervacije): today = dt.date.today() tomorrow =", "iz forme ime = request.forms.ime#get(\"\") priimek = request.forms.priimek#get(\"\") emso =", "= dt.date.today() gostje = rezervacija.gostje sestevek, postavke = dobi_postavke_racuna(rezervacija) slovar_cen", "redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file", "\"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, }, datum_od, datum_do) return redirect(f\"/parcela/{id_parcele}\") @bottle.get(\"/dodaj-gosta/<id_rezervacije>\")", "def index(): parcele = dobi_parcele_za_prikaz(dt.date.today()) return template(\"domov\", parcele=parcele, hide_header_back=True) @bottle.get(\"/parcela/<id_parcele>\")", "{ \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, }, datum_od, datum_do) return redirect(f\"/parcela/{id_parcele}\")", "slovar_cen = {} slovar_kolicin = {} for gost in gostje:", "pretvorbi datumov\") return redirect(\"/naredi-rezervacijo\") rezervacija = naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso,", "request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") print(ime, priimek) try: datum_od = dt.datetime.fromisoformat(od).date()", "{ \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, },datum_od,datum_do) print(id_rezervacije) return redirect(f\"/parcela/{rezervacija.id_parcele}\") @bottle.get(\"/predracun/<id_rezervacije>\")", "ne obstaja!\", naslov=\"Napaka\") return template(\"dodajanje_gosta\", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow) @bottle.post(\"/dodaj-gosta-na-rezervacijo\") def", "today + dt.timedelta(days=1) rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return", "template(\"error\", sporocilo=\"Stran ne obstaja!\", naslov=\"404\") @bottle.error(500) def napaka500(a): return template(\"error\",", "static_file import bottle import controller from controller import dobi_parcele_za_prikaz, dobi_info_parcele,", "dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except Exception as e: print(e) print(\"Napaka", "postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today()) slovar_cen = {} slovar_kolicin = {}", "\" V modelu rezervaciji doda gosta\" # Preberemo lastnosti iz", "def predracun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\",", "dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, },datum_od,datum_do) print(id_rezervacije) return redirect(f\"/parcela/{rezervacija.id_parcele}\")", "= dobi_parcele_za_prikaz(dt.date.today()) return template(\"domov\", parcele=parcele, hide_header_back=True) @bottle.get(\"/parcela/<id_parcele>\") def parcela(id_parcele): 'Preverimo", "datumov\") return redirect(\"/dodaj-gosta\") rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return", "stanje parcele' rez, gostje = dobi_info_parcele(id_parcele, dt.date.today()) if rez is", "\"drzava\":drzava, }, datum_od, datum_do) return redirect(f\"/parcela/{id_parcele}\") @bottle.get(\"/dodaj-gosta/<id_rezervacije>\") def get_dodaj_gosta_na_rezervacijo(id_rezervacije): today", "gostje = dobi_info_parcele(id_parcele, dt.date.today()) if rez is not None: stanje", "+ dt.timedelta(days=1) return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow) @bottle.post(\"/naredi-rezervacijo\") def naredi_novo_rezervacijo():", "ime = request.forms.ime#get(\"\") priimek = request.forms.priimek#get(\"\") emso = request.forms.emso#get(\"\") drzava", "request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_rezervacije = request.forms.rez#get(\"\") od = request.forms.zacetek#get(\"\")", "slovar_kolicin = {} for gost in gostje: slovar_kolicin[gost] = len(gost.nocitve)", "in ji doda prvega gosta\" # Preberemo lastnosti iz forme", "# Preberemo lastnosti iz forme ime = request.forms.ime#get(\"\") priimek =", "dobi_postavke_racuna(rezervacija) slovar_cen = {} slovar_kolicin = {} for gost in", "as e: print(e) print(\"Napaka pri pretvorbi datumov\") return redirect(\"/dodaj-gosta\") rezervacija", "\" V modelu naredi novo rezervacijo in ji doda prvega", "post, request, response, auth_basic, Bottle, abort, error, static_file import bottle", "request.forms.id_parcele#get(\"\") od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") print(ime, priimek) try:", "format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f') return template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje,", "voljo\" return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje) @bottle.get(\"/naredi-rezervacijo/<id_parcele>\") def nova_rezervacija(id_parcele=None):", "request.forms.rez#get(\"\") od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") try: datum_od =", "today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.error(404) def napaka404(a): return template(\"error\", sporocilo=\"Stran ne", "gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.get(\"/zakljuci/<id_rezervacije>\") def racun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije)", "\"drzava\":drzava, },datum_od,datum_do) print(id_rezervacije) return redirect(f\"/parcela/{rezervacija.id_parcele}\") @bottle.get(\"/predracun/<id_rezervacije>\") def predracun(id_rezervacije): rezervacija =", "dt.datetime.fromisoformat(do).date() except Exception as e: print(e) print(\"Napaka pri pretvorbi datumov\")", "= request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") print(ime, priimek) try: datum_od =", "dt.date.today() tomorrow = today + dt.timedelta(days=1) rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if", "dobi_postavke_racuna import datetime as dt @bottle.get('/') def root(): redirect('/domov') @bottle.get('/domov')", "template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") today = dt.date.today() gostje =", "gosta\" # Preberemo lastnosti iz forme ime = request.forms.ime#get(\"\") priimek", "sporocilo=\"Stran ne obstaja!\", naslov=\"404\") @bottle.error(500) def napaka500(a): return template(\"error\", sporocilo=\"Napaka", "print(\"Napaka pri pretvorbi datumov\") return redirect(\"/naredi-rezervacijo\") rezervacija = naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije,", "datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except Exception as e:", "gostje=gostje) @bottle.get(\"/naredi-rezervacijo/<id_parcele>\") def nova_rezervacija(id_parcele=None): print(id_parcele) today = dt.date.today() tomorrow =", "V modelu naredi novo rezervacijo in ji doda prvega gosta\"", "response, auth_basic, Bottle, abort, error, static_file import bottle import controller", "= request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") try: datum_od = dt.datetime.fromisoformat(od).date() datum_do", "template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") return template(\"dodajanje_gosta\", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow)", "return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") return template(\"dodajanje_gosta\", id_rezervacije=id_rezervacije, today=today,", "dobi_info_parcele(id_parcele, dt.date.today()) if rez is not None: stanje = \"Parcela", "id_parcele=id_parcele, today=today, tomorrow=tomorrow) @bottle.post(\"/naredi-rezervacijo\") def naredi_novo_rezervacijo(): \" V modelu naredi", "return template(\"dodajanje_gosta\", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow) @bottle.post(\"/dodaj-gosta-na-rezervacijo\") def post_dodaj_gosta_na_rezervacijo(): \" V", "request, response, auth_basic, Bottle, abort, error, static_file import bottle import", "modelu naredi novo rezervacijo in ji doda prvega gosta\" #", "tomorrow = today + dt.timedelta(days=1) rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not", "rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne", "not rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") today =", "get_dodaj_gosta_na_rezervacijo(id_rezervacije): today = dt.date.today() tomorrow = today + dt.timedelta(days=1) rezervacija", "rezervacija=rez, stanje=stanje, gostje=gostje) @bottle.get(\"/naredi-rezervacijo/<id_parcele>\") def nova_rezervacija(id_parcele=None): print(id_parcele) today = dt.date.today()", "obstaja!\", naslov=\"404\") @bottle.error(500) def napaka500(a): return template(\"error\", sporocilo=\"Napaka streznika!\", naslov=\"500\")", "= zakljuci_na_datum_in_placaj(rezervacija, dt.date.today()) slovar_cen = {} slovar_kolicin = {} for", "V modelu rezervaciji doda gosta\" # Preberemo lastnosti iz forme", "@bottle.error(404) def napaka404(a): return template(\"error\", sporocilo=\"Stran ne obstaja!\", naslov=\"404\") @bottle.error(500)", "route, run, template, redirect, get, post, request, response, auth_basic, Bottle,", "def racun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\",", "return template(\"domov\", parcele=parcele, hide_header_back=True) @bottle.get(\"/parcela/<id_parcele>\") def parcela(id_parcele): 'Preverimo stanje parcele'", "today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.get(\"/zakljuci/<id_rezervacije>\") def racun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if", "if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") today", "dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, }, datum_od, datum_do) return", "dt.date.today()) if rez is not None: stanje = \"Parcela je", "redirect(\"/naredi-rezervacijo\") rezervacija = naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava,", "@bottle.post(\"/naredi-rezervacijo\") def naredi_novo_rezervacijo(): \" V modelu naredi novo rezervacijo in", "in gostje: slovar_kolicin[gost] = len(gost.nocitve) slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost),", "dobi_parcele_za_prikaz(dt.date.today()) return template(\"domov\", parcele=parcele, hide_header_back=True) @bottle.get(\"/parcela/<id_parcele>\") def parcela(id_parcele): 'Preverimo stanje", "je trenutno na voljo\" return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje)", "@bottle.post(\"/dodaj-gosta-na-rezervacijo\") def post_dodaj_gosta_na_rezervacijo(): \" V modelu rezervaciji doda gosta\" #", "from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna", "zakljuci_na_datum_in_placaj, dobi_postavke_racuna import datetime as dt @bottle.get('/') def root(): redirect('/domov')", "rez is not None: stanje = \"Parcela je trenutno zasedena\"", "def napaka404(a): return template(\"error\", sporocilo=\"Stran ne obstaja!\", naslov=\"404\") @bottle.error(500) def", "emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_rezervacije = request.forms.rez#get(\"\") od", "* slovar_kolicin.get(gost), '.2f') return template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"),", "'.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.get(\"/zakljuci/<id_rezervacije>\") def racun(id_rezervacije): rezervacija =", "def get_dodaj_gosta_na_rezervacijo(id_rezervacije): today = dt.date.today() tomorrow = today + dt.timedelta(days=1)", "print(ime, priimek) try: datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except", "redirect('/domov') @bottle.get('/domov') def index(): parcele = dobi_parcele_za_prikaz(dt.date.today()) return template(\"domov\", parcele=parcele,", "napaka404(a): return template(\"error\", sporocilo=\"Stran ne obstaja!\", naslov=\"404\") @bottle.error(500) def napaka500(a):", "= dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\",", "novo rezervacijo in ji doda prvega gosta\" # Preberemo lastnosti", "stanje = \"Parcela je trenutno na voljo\" return template('parcela', id_parcela=id_parcele,", "dt @bottle.get('/') def root(): redirect('/domov') @bottle.get('/domov') def index(): parcele =", "id_parcele = request.forms.id_parcele#get(\"\") od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") print(ime,", "not rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") return template(\"dodajanje_gosta\",", "zasedena\" else: stanje = \"Parcela je trenutno na voljo\" return", "+ dt.timedelta(days=1) rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\",", "parcele=parcele, hide_header_back=True) @bottle.get(\"/parcela/<id_parcele>\") def parcela(id_parcele): 'Preverimo stanje parcele' rez, gostje", "request.forms.ime priimek = request.forms.priimek emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\")", "datum_do) return redirect(f\"/parcela/{id_parcele}\") @bottle.get(\"/dodaj-gosta/<id_rezervacije>\") def get_dodaj_gosta_na_rezervacijo(id_rezervacije): today = dt.date.today() tomorrow", "\"priimek\":priimek, \"drzava\":drzava, },datum_od,datum_do) print(id_rezervacije) return redirect(f\"/parcela/{rezervacija.id_parcele}\") @bottle.get(\"/predracun/<id_rezervacije>\") def predracun(id_rezervacije): rezervacija", "= request.forms.konec#get(\"\") print(ime, priimek) try: datum_od = dt.datetime.fromisoformat(od).date() datum_do =", "return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje) @bottle.get(\"/naredi-rezervacijo/<id_parcele>\") def nova_rezervacija(id_parcele=None): print(id_parcele)", "= request.forms.rez#get(\"\") od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") try: datum_od", "id_rezervacije = request.forms.rez#get(\"\") od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") try:", "print(id_parcele) today = dt.date.today() tomorrow = today + dt.timedelta(days=1) return", "parcela(id_parcele): 'Preverimo stanje parcele' rez, gostje = dobi_info_parcele(id_parcele, dt.date.today()) if", "today = dt.date.today() gostje = rezervacija.gostje sestevek, postavke = dobi_postavke_racuna(rezervacija)", "request.forms.ime#get(\"\") priimek = request.forms.priimek#get(\"\") emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\")", "pri pretvorbi datumov\") return redirect(\"/dodaj-gosta\") rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not", "priimek = request.forms.priimek#get(\"\") emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_parcele", "id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje) @bottle.get(\"/naredi-rezervacijo/<id_parcele>\") def nova_rezervacija(id_parcele=None): print(id_parcele) today =", "print(e) print(\"Napaka pri pretvorbi datumov\") return redirect(\"/naredi-rezervacijo\") rezervacija = naredi_rezervacijo(id_parcele)", "doda prvega gosta\" # Preberemo lastnosti iz forme ime =", "trenutno na voljo\" return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje) @bottle.get(\"/naredi-rezervacijo/<id_parcele>\")", "error, static_file import bottle import controller from controller import dobi_parcele_za_prikaz,", "naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, }, datum_od, datum_do)", "abort, error, static_file import bottle import controller from controller import", "= format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f') return template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'),", "racun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija", "template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje) @bottle.get(\"/naredi-rezervacijo/<id_parcele>\") def nova_rezervacija(id_parcele=None): print(id_parcele) today", "Preberemo lastnosti iz forme ime = request.forms.ime#get(\"\") priimek = request.forms.priimek#get(\"\")", "\"Parcela je trenutno na voljo\" return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje,", "dt.timedelta(days=1) rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija", "{} for gost in gostje: slovar_kolicin[gost] = len(gost.nocitve) slovar_cen[gost] =", "od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") print(ime, priimek) try: datum_od", "= request.forms.ime priimek = request.forms.priimek emso = request.forms.emso#get(\"\") drzava =", "id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.error(404) def napaka404(a):", "rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") return template(\"dodajanje_gosta\", id_rezervacije=id_rezervacije,", "else: stanje = \"Parcela je trenutno na voljo\" return template('parcela',", "tomorrow = today + dt.timedelta(days=1) return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow)", "e: print(e) print(\"Napaka pri pretvorbi datumov\") return redirect(\"/dodaj-gosta\") rezervacija =", "bottle import controller from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo,", "\"priimek\":priimek, \"drzava\":drzava, }, datum_od, datum_do) return redirect(f\"/parcela/{id_parcele}\") @bottle.get(\"/dodaj-gosta/<id_rezervacije>\") def get_dodaj_gosta_na_rezervacijo(id_rezervacije):", "= dt.date.today() tomorrow = today + dt.timedelta(days=1) return template('nova_rezervacija', id_parcele=id_parcele,", "post_dodaj_gosta_na_rezervacijo(): \" V modelu rezervaciji doda gosta\" # Preberemo lastnosti", "print(id_rezervacije) return redirect(f\"/parcela/{rezervacija.id_parcele}\") @bottle.get(\"/predracun/<id_rezervacije>\") def predracun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if", "rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso,", "not None: stanje = \"Parcela je trenutno zasedena\" else: stanje", "dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna import datetime as dt @bottle.get('/')", "forme ime = request.forms.ime#get(\"\") priimek = request.forms.priimek#get(\"\") emso = request.forms.emso#get(\"\")", "trenutno zasedena\" else: stanje = \"Parcela je trenutno na voljo\"", "gostje = rezervacija.gostje sestevek, postavke = dobi_postavke_racuna(rezervacija) slovar_cen = {}", "= dobi_postavke_racuna(rezervacija) slovar_cen = {} slovar_kolicin = {} for gost", "= request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_rezervacije = request.forms.rez#get(\"\") od =", "naslov=\"Napaka\") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, },datum_od,datum_do) print(id_rezervacije) return", "postavke = dobi_postavke_racuna(rezervacija) slovar_cen = {} slovar_kolicin = {} for", "template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.error(404) def", "dt.date.today() tomorrow = today + dt.timedelta(days=1) return template('nova_rezervacija', id_parcele=id_parcele, today=today,", "@bottle.get(\"/naredi-rezervacijo/<id_parcele>\") def nova_rezervacija(id_parcele=None): print(id_parcele) today = dt.date.today() tomorrow = today", "Preberemo lastnosti iz forme ime = request.forms.ime priimek = request.forms.priimek", "pretvorbi datumov\") return redirect(\"/dodaj-gosta\") rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija:", "gosta\" # Preberemo lastnosti iz forme ime = request.forms.ime priimek", "ime = request.forms.ime priimek = request.forms.priimek emso = request.forms.emso#get(\"\") drzava", "lastnosti iz forme ime = request.forms.ime#get(\"\") priimek = request.forms.priimek#get(\"\") emso", "request.forms.konec#get(\"\") try: datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except Exception", "= \"Parcela je trenutno zasedena\" else: stanje = \"Parcela je", "request.forms.konec#get(\"\") print(ime, priimek) try: datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date()", "Bottle, abort, error, static_file import bottle import controller from controller", "dt.timedelta(days=1) return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow) @bottle.post(\"/naredi-rezervacijo\") def naredi_novo_rezervacijo(): \"", "@bottle.get(\"/parcela/<id_parcele>\") def parcela(id_parcele): 'Preverimo stanje parcele' rez, gostje = dobi_info_parcele(id_parcele,", "= dt.date.today() gostje = rezervacija.gostje sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today())", "gost in gostje: slovar_kolicin[gost] = len(gost.nocitve) slovar_cen[gost] = format(gost.cena_nocitve() *", "obstaja!\", naslov=\"Napaka\") return template(\"dodajanje_gosta\", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow) @bottle.post(\"/dodaj-gosta-na-rezervacijo\") def post_dodaj_gosta_na_rezervacijo():", "dt.date.today()) slovar_cen = {} slovar_kolicin = {} for gost in", "forme ime = request.forms.ime priimek = request.forms.priimek emso = request.forms.emso#get(\"\")", "from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post,", "},datum_od,datum_do) print(id_rezervacije) return redirect(f\"/parcela/{rezervacija.id_parcele}\") @bottle.get(\"/predracun/<id_rezervacije>\") def predracun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije)", "import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response,", "nova_rezervacija(id_parcele=None): print(id_parcele) today = dt.date.today() tomorrow = today + dt.timedelta(days=1)", "ne obstaja!\", naslov=\"404\") @bottle.error(500) def napaka500(a): return template(\"error\", sporocilo=\"Napaka streznika!\",", "do = request.forms.konec#get(\"\") print(ime, priimek) try: datum_od = dt.datetime.fromisoformat(od).date() datum_do", "pri pretvorbi datumov\") return redirect(\"/naredi-rezervacijo\") rezervacija = naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {", "drzava = request.forms.drzava#get(\"\") id_rezervacije = request.forms.rez#get(\"\") od = request.forms.zacetek#get(\"\") do", "od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\") try: datum_od = dt.datetime.fromisoformat(od).date()", "print(\"Napaka pri pretvorbi datumov\") return redirect(\"/dodaj-gosta\") rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if", "dt.date.today() gostje = rezervacija.gostje sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today()) slovar_cen", "@bottle.get('/domov') def index(): parcele = dobi_parcele_za_prikaz(dt.date.today()) return template(\"domov\", parcele=parcele, hide_header_back=True)", "naslov=\"Napaka\") today = dt.date.today() gostje = rezervacija.gostje sestevek, postavke =", "prvega gosta\" # Preberemo lastnosti iz forme ime = request.forms.ime#get(\"\")", "except Exception as e: print(e) print(\"Napaka pri pretvorbi datumov\") return", "\"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, },datum_od,datum_do) print(id_rezervacije) return redirect(f\"/parcela/{rezervacija.id_parcele}\") @bottle.get(\"/predracun/<id_rezervacije>\") def", "@bottle.get(\"/zakljuci/<id_rezervacije>\") def racun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return", "sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.get(\"/zakljuci/<id_rezervacije>\") def racun(id_rezervacije): rezervacija", "index(): parcele = dobi_parcele_za_prikaz(dt.date.today()) return template(\"domov\", parcele=parcele, hide_header_back=True) @bottle.get(\"/parcela/<id_parcele>\") def", "naslov=\"404\") @bottle.error(500) def napaka500(a): return template(\"error\", sporocilo=\"Napaka streznika!\", naslov=\"500\") bottle.run(reloader=True,", "dt.date.today() gostje = rezervacija.gostje sestevek, postavke = dobi_postavke_racuna(rezervacija) slovar_cen =", "slovar_kolicin=slovar_kolicin) @bottle.get(\"/zakljuci/<id_rezervacije>\") def racun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija:", "\"Parcela je trenutno zasedena\" else: stanje = \"Parcela je trenutno", "id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow) @bottle.post(\"/dodaj-gosta-na-rezervacijo\") def post_dodaj_gosta_na_rezervacijo(): \" V modelu rezervaciji", "gostje: slovar_kolicin[gost] = len(gost.nocitve) slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f')", "rez, gostje = dobi_info_parcele(id_parcele, dt.date.today()) if rez is not None:", "redirect(f\"/parcela/{id_parcele}\") @bottle.get(\"/dodaj-gosta/<id_rezervacije>\") def get_dodaj_gosta_na_rezervacijo(id_rezervacije): today = dt.date.today() tomorrow = today", "@bottle.get(\"/dodaj-gosta/<id_rezervacije>\") def get_dodaj_gosta_na_rezervacijo(id_rezervacije): today = dt.date.today() tomorrow = today +", "drzava = request.forms.drzava#get(\"\") id_parcele = request.forms.id_parcele#get(\"\") od = request.forms.zacetek#get(\"\") do", "= request.forms.konec#get(\"\") try: datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except", "import controller from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id,", "if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") return", "request.forms.drzava#get(\"\") id_parcele = request.forms.id_parcele#get(\"\") od = request.forms.zacetek#get(\"\") do = request.forms.konec#get(\"\")", "'.2f') return template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin)", "}, datum_od, datum_do) return redirect(f\"/parcela/{id_parcele}\") @bottle.get(\"/dodaj-gosta/<id_rezervacije>\") def get_dodaj_gosta_na_rezervacijo(id_rezervacije): today =", "\"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, }, datum_od, datum_do) return redirect(f\"/parcela/{id_parcele}\") @bottle.get(\"/dodaj-gosta/<id_rezervacije>\") def", "return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow) @bottle.post(\"/naredi-rezervacijo\") def naredi_novo_rezervacijo(): \" V", "obstaja!\", naslov=\"Napaka\") today = dt.date.today() gostje = rezervacija.gostje sestevek, postavke", "def parcela(id_parcele): 'Preverimo stanje parcele' rez, gostje = dobi_info_parcele(id_parcele, dt.date.today())", "slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.get(\"/zakljuci/<id_rezervacije>\") def racun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not", "ji doda prvega gosta\" # Preberemo lastnosti iz forme ime", "priimek) try: datum_od = dt.datetime.fromisoformat(od).date() datum_do = dt.datetime.fromisoformat(do).date() except Exception", "import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna import datetime", "je trenutno zasedena\" else: stanje = \"Parcela je trenutno na", "auth_basic, Bottle, abort, error, static_file import bottle import controller from", "def naredi_novo_rezervacijo(): \" V modelu naredi novo rezervacijo in ji", "return template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.get(\"/zakljuci/<id_rezervacije>\")", "predracun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija", "None: stanje = \"Parcela je trenutno zasedena\" else: stanje =", "redirect(\"/dodaj-gosta\") rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija: return template(\"error\", sporocilo=\"Rezervacija", "template(\"dodajanje_gosta\", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow) @bottle.post(\"/dodaj-gosta-na-rezervacijo\") def post_dodaj_gosta_na_rezervacijo(): \" V modelu", "= naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek, \"drzava\":drzava, }, datum_od,", "na voljo\" return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje) @bottle.get(\"/naredi-rezervacijo/<id_parcele>\") def", "= request.forms.priimek#get(\"\") emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_parcele =", "today + dt.timedelta(days=1) return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow) @bottle.post(\"/naredi-rezervacijo\") def", "slovar_kolicin.get(gost), '.2f') return template(\"racun\", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime(\"%d/%m/%Y\"), slovar_cen=slovar_cen,", "root(): redirect('/domov') @bottle.get('/domov') def index(): parcele = dobi_parcele_za_prikaz(dt.date.today()) return template(\"domov\",", "return redirect(f\"/parcela/{rezervacija.id_parcele}\") @bottle.get(\"/predracun/<id_rezervacije>\") def predracun(id_rezervacije): rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not", "slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin) @bottle.error(404) def napaka404(a): return template(\"error\", sporocilo=\"Stran ne obstaja!\",", "= {} for gost in gostje: slovar_kolicin[gost] = len(gost.nocitve) slovar_cen[gost]", "return redirect(\"/naredi-rezervacijo\") rezervacija = naredi_rezervacijo(id_parcele) dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime, \"priimek\":priimek,", "today = dt.date.today() gostje = rezervacija.gostje sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija,", "run, template, redirect, get, post, request, response, auth_basic, Bottle, abort,", "request.forms.priimek emso = request.forms.emso#get(\"\") drzava = request.forms.drzava#get(\"\") id_rezervacije = request.forms.rez#get(\"\")", "@bottle.get('/') def root(): redirect('/domov') @bottle.get('/domov') def index(): parcele = dobi_parcele_za_prikaz(dt.date.today())", "return template(\"error\", sporocilo=\"Rezervacija ne obstaja!\", naslov=\"Napaka\") dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, { \"EMSO\":emso, \"ime\":ime,", "Exception as e: print(e) print(\"Napaka pri pretvorbi datumov\") return redirect(\"/naredi-rezervacijo\")", "= today + dt.timedelta(days=1) rezervacija = dobi_rezervacijo_po_id(id_rezervacije) if not rezervacija:" ]
[ "initialize def initializer(model, args): \"\"\"Initialize transducer model. Args: model (torch.nn.Module):", "coding: utf-8 -*- \"\"\"Parameter initialization for transducer RNN/Transformer parts.\"\"\" import", "model.dec.embed.weight.data.normal_(0, 1) for l in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if args.etype", "instance args (Namespace): argument Namespace containing options \"\"\" if args.dtype", "args (Namespace): argument Namespace containing options \"\"\" if args.dtype !=", "\"\"\" if args.dtype != \"transformer\": if args.etype == \"transformer\": initialize(model.encoder,", "== \"transformer\": initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1) for", "transducer model. Args: model (torch.nn.Module): transducer instance args (Namespace): argument", "espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer import initialize def initializer(model, args):", "l in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if args.etype == \"transformer\": initialize(model,", "options \"\"\" if args.dtype != \"transformer\": if args.etype == \"transformer\":", "from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer import initialize def initializer(model,", "in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if args.etype == \"transformer\": initialize(model, args.transformer_init)", "if args.dtype != \"transformer\": if args.etype == \"transformer\": initialize(model.encoder, args.transformer_init)", "(Namespace): argument Namespace containing options \"\"\" if args.dtype != \"transformer\":", "RNN/Transformer parts.\"\"\" import six from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization", "transducer instance args (Namespace): argument Namespace containing options \"\"\" if", "set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if args.etype == \"transformer\": initialize(model, args.transformer_init) else: lecun_normal_init_parameters(model.encoder)", "else: if args.etype == \"transformer\": initialize(model, args.transformer_init) else: lecun_normal_init_parameters(model.encoder) initialize(model.decoder,", "import lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer import initialize", "model (torch.nn.Module): transducer instance args (Namespace): argument Namespace containing options", "# -*- coding: utf-8 -*- \"\"\"Parameter initialization for transducer RNN/Transformer", "for transducer RNN/Transformer parts.\"\"\" import six from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters", "six from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one from", "def initializer(model, args): \"\"\"Initialize transducer model. Args: model (torch.nn.Module): transducer", "Args: model (torch.nn.Module): transducer instance args (Namespace): argument Namespace containing", "(torch.nn.Module): transducer instance args (Namespace): argument Namespace containing options \"\"\"", "args.transformer_init) lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1) for l in six.moves.range(len(model.dec.decoder)):", "initializer(model, args): \"\"\"Initialize transducer model. Args: model (torch.nn.Module): transducer instance", "\"\"\"Parameter initialization for transducer RNN/Transformer parts.\"\"\" import six from espnet.nets.pytorch_backend.initialization", "model. Args: model (torch.nn.Module): transducer instance args (Namespace): argument Namespace", "\"transformer\": initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1) for l", "initialization for transducer RNN/Transformer parts.\"\"\" import six from espnet.nets.pytorch_backend.initialization import", "containing options \"\"\" if args.dtype != \"transformer\": if args.etype ==", "else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1) for l in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else:", "initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1) for l in", "lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1) for l in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if", "for l in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if args.etype == \"transformer\":", "-*- coding: utf-8 -*- \"\"\"Parameter initialization for transducer RNN/Transformer parts.\"\"\"", "\"transformer\": if args.etype == \"transformer\": initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model)", "from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\"Parameter initialization for", "python3 # -*- coding: utf-8 -*- \"\"\"Parameter initialization for transducer", "if args.etype == \"transformer\": initialize(model, args.transformer_init) else: lecun_normal_init_parameters(model.encoder) initialize(model.decoder, args.transformer_init)", "args.dtype != \"transformer\": if args.etype == \"transformer\": initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec)", "import six from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one", "utf-8 -*- \"\"\"Parameter initialization for transducer RNN/Transformer parts.\"\"\" import six", "set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer import initialize def initializer(model, args): \"\"\"Initialize transducer", "import initialize def initializer(model, args): \"\"\"Initialize transducer model. Args: model", "lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer import initialize def", "from espnet.nets.pytorch_backend.transformer.initializer import initialize def initializer(model, args): \"\"\"Initialize transducer model.", "!= \"transformer\": if args.etype == \"transformer\": initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec) else:", "espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer import", "1) for l in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if args.etype ==", "args): \"\"\"Initialize transducer model. Args: model (torch.nn.Module): transducer instance args", "espnet.nets.pytorch_backend.transformer.initializer import initialize def initializer(model, args): \"\"\"Initialize transducer model. Args:", "<filename>espnet/nets/pytorch_backend/transducer/initializer.py<gh_stars>1-10 #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\"Parameter initialization", "-*- \"\"\"Parameter initialization for transducer RNN/Transformer parts.\"\"\" import six from", "\"\"\"Initialize transducer model. Args: model (torch.nn.Module): transducer instance args (Namespace):", "transducer RNN/Transformer parts.\"\"\" import six from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters from", "parts.\"\"\" import six from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters from espnet.nets.pytorch_backend.initialization import", "argument Namespace containing options \"\"\" if args.dtype != \"transformer\": if", "Namespace containing options \"\"\" if args.dtype != \"transformer\": if args.etype", "lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1) for l in six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih)", "six.moves.range(len(model.dec.decoder)): set_forget_bias_to_one(model.dec.decoder[l].bias_ih) else: if args.etype == \"transformer\": initialize(model, args.transformer_init) else:", "import set_forget_bias_to_one from espnet.nets.pytorch_backend.transformer.initializer import initialize def initializer(model, args): \"\"\"Initialize", "args.etype == \"transformer\": initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0, 1)", "if args.etype == \"transformer\": initialize(model.encoder, args.transformer_init) lecun_normal_init_parameters(model.dec) else: lecun_normal_init_parameters(model) model.dec.embed.weight.data.normal_(0," ]
[ "np.multiply( P.dot( model.P1 ).dot( backward_likelihood(i+1, trace) ), model.M[:, trace[i][1]] )", "trace[i][1]] ) if likelihoods.sum() != 0: likelihoods = likelihoods /", "backward_likelihood(i, trace): N = model.N M = len( trace )", "= traces_factory.traces P0 = np.matrix(\"[ .02 0;\" \"0 0 0.5;\"", "= np.matrix(\"[0.1 0 0;\" \"0 0.5 0;\" \"0 0 0.9]\")", "P1 = np.matrix(\"[0.1 0 0;\" \"0 0.5 0;\" \"0 0", "= df.DatasetFactory() traces_factory.createDataset(T) traces = traces_factory.traces P0 = np.matrix(\"[ .02", "trace): N = model.N M = len( trace ) likelihoods", "model.uniformization_rate, trace[i][0]) # P = stored_p_values[i, :, :] likelihoods =", "np.ones((N, 1)) if i < M: P = main.randomization(P0, model.uniformization_rate,", "as np import datasetReader as df import main # Number", "), model.M[:, trace[i][1]] ) if likelihoods.sum() != 0: likelihoods =", ":, :] likelihoods = np.multiply( P.dot( model.P1 ).dot( backward_likelihood(i+1, trace)", "0;\" \"0 0 0.85]\") def backward_likelihood(i, trace): N = model.N", "likelihoods.sum() != 0: likelihoods = likelihoods / likelihoods.sum() return likelihoods", "main.randomization(P0, model.uniformization_rate, trace[i][0]) # P = stored_p_values[i, :, :] likelihoods", "\"0 0 0]\") P1 = np.matrix(\"[0.1 0 0;\" \"0 0.5", ") likelihoods = np.ones((N, 1)) if i < M: P", "0]\") P1 = np.matrix(\"[0.1 0 0;\" \"0 0.5 0;\" \"0", "trace) ), model.M[:, trace[i][1]] ) if likelihoods.sum() != 0: likelihoods", "# Number of traces loaded T T = 1 #", "P = main.randomization(P0, model.uniformization_rate, trace[i][0]) # P = stored_p_values[i, :,", ").dot( backward_likelihood(i+1, trace) ), model.M[:, trace[i][1]] ) if likelihoods.sum() !=", "# P = stored_p_values[i, :, :] likelihoods = np.multiply( P.dot(", "trace[i][0]) # P = stored_p_values[i, :, :] likelihoods = np.multiply(", "if likelihoods.sum() != 0: likelihoods = likelihoods / likelihoods.sum() return", "np.matrix(\"[0.1 0 0;\" \"0 0.5 0;\" \"0 0 0.9]\") M", "model.N M = len( trace ) likelihoods = np.ones((N, 1))", "stored_p_values[i, :, :] likelihoods = np.multiply( P.dot( model.P1 ).dot( backward_likelihood(i+1,", "T = 1 # Generate traces traces_factory = df.DatasetFactory() traces_factory.createDataset(T)", "def backward_likelihood(i, trace): N = model.N M = len( trace", "P0 = np.matrix(\"[ .02 0;\" \"0 0 0.5;\" \"0 0", "0 0]\") P1 = np.matrix(\"[0.1 0 0;\" \"0 0.5 0;\"", "= 1 # Generate traces traces_factory = df.DatasetFactory() traces_factory.createDataset(T) traces", "traces_factory.traces P0 = np.matrix(\"[ .02 0;\" \"0 0 0.5;\" \"0", "0 0;\" \"0 0.23 0;\" \"0 0 0.85]\") def backward_likelihood(i,", "model.M[:, trace[i][1]] ) if likelihoods.sum() != 0: likelihoods = likelihoods", "np import datasetReader as df import main # Number of", "traces_factory = df.DatasetFactory() traces_factory.createDataset(T) traces = traces_factory.traces P0 = np.matrix(\"[", "M: P = main.randomization(P0, model.uniformization_rate, trace[i][0]) # P = stored_p_values[i,", ":] likelihoods = np.multiply( P.dot( model.P1 ).dot( backward_likelihood(i+1, trace) ),", "0.9]\") M = np.matrix(\"[0.25 0 0;\" \"0 0.23 0;\" \"0", ") if likelihoods.sum() != 0: likelihoods = likelihoods / likelihoods.sum()", "main # Number of traces loaded T T = 1", "Number of traces loaded T T = 1 # Generate", "\"0 0 0.5;\" \"0 0 0]\") P1 = np.matrix(\"[0.1 0", "\"0 0 0.9]\") M = np.matrix(\"[0.25 0 0;\" \"0 0.23", "= stored_p_values[i, :, :] likelihoods = np.multiply( P.dot( model.P1 ).dot(", "0 0.9]\") M = np.matrix(\"[0.25 0 0;\" \"0 0.23 0;\"", "= np.matrix(\"[0.25 0 0;\" \"0 0.23 0;\" \"0 0 0.85]\")", "traces_factory.createDataset(T) traces = traces_factory.traces P0 = np.matrix(\"[ .02 0;\" \"0", "0;\" \"0 0 0.5;\" \"0 0 0]\") P1 = np.matrix(\"[0.1", "< M: P = main.randomization(P0, model.uniformization_rate, trace[i][0]) # P =", "\"0 0 0.85]\") def backward_likelihood(i, trace): N = model.N M", "M = np.matrix(\"[0.25 0 0;\" \"0 0.23 0;\" \"0 0", "import numpy as np import datasetReader as df import main", "= np.ones((N, 1)) if i < M: P = main.randomization(P0,", "trace ) likelihoods = np.ones((N, 1)) if i < M:", "0 0.85]\") def backward_likelihood(i, trace): N = model.N M =", "Generate traces traces_factory = df.DatasetFactory() traces_factory.createDataset(T) traces = traces_factory.traces P0", "traces = traces_factory.traces P0 = np.matrix(\"[ .02 0;\" \"0 0", "np.matrix(\"[0.25 0 0;\" \"0 0.23 0;\" \"0 0 0.85]\") def", "P = stored_p_values[i, :, :] likelihoods = np.multiply( P.dot( model.P1", "np.matrix(\"[ .02 0;\" \"0 0 0.5;\" \"0 0 0]\") P1", "= len( trace ) likelihoods = np.ones((N, 1)) if i", "= np.matrix(\"[ .02 0;\" \"0 0 0.5;\" \"0 0 0]\")", "traces traces_factory = df.DatasetFactory() traces_factory.createDataset(T) traces = traces_factory.traces P0 =", "T T = 1 # Generate traces traces_factory = df.DatasetFactory()", "0.5 0;\" \"0 0 0.9]\") M = np.matrix(\"[0.25 0 0;\"", "0;\" \"0 0 0.9]\") M = np.matrix(\"[0.25 0 0;\" \"0", "df import main # Number of traces loaded T T", "likelihoods = np.multiply( P.dot( model.P1 ).dot( backward_likelihood(i+1, trace) ), model.M[:,", "M = len( trace ) likelihoods = np.ones((N, 1)) if", "as df import main # Number of traces loaded T", "import main # Number of traces loaded T T =", "i < M: P = main.randomization(P0, model.uniformization_rate, trace[i][0]) # P", "= main.randomization(P0, model.uniformization_rate, trace[i][0]) # P = stored_p_values[i, :, :]", "0.5;\" \"0 0 0]\") P1 = np.matrix(\"[0.1 0 0;\" \"0", "0;\" \"0 0.5 0;\" \"0 0 0.9]\") M = np.matrix(\"[0.25", "1 # Generate traces traces_factory = df.DatasetFactory() traces_factory.createDataset(T) traces =", "N = model.N M = len( trace ) likelihoods =", "0.23 0;\" \"0 0 0.85]\") def backward_likelihood(i, trace): N =", "datasetReader as df import main # Number of traces loaded", "loaded T T = 1 # Generate traces traces_factory =", "# Generate traces traces_factory = df.DatasetFactory() traces_factory.createDataset(T) traces = traces_factory.traces", "if i < M: P = main.randomization(P0, model.uniformization_rate, trace[i][0]) #", "of traces loaded T T = 1 # Generate traces", "\"0 0.23 0;\" \"0 0 0.85]\") def backward_likelihood(i, trace): N", "0 0;\" \"0 0.5 0;\" \"0 0 0.9]\") M =", "= model.N M = len( trace ) likelihoods = np.ones((N,", "0 0.5;\" \"0 0 0]\") P1 = np.matrix(\"[0.1 0 0;\"", "<filename>evaluate.py import model import numpy as np import datasetReader as", ".02 0;\" \"0 0 0.5;\" \"0 0 0]\") P1 =", "= np.multiply( P.dot( model.P1 ).dot( backward_likelihood(i+1, trace) ), model.M[:, trace[i][1]]", "df.DatasetFactory() traces_factory.createDataset(T) traces = traces_factory.traces P0 = np.matrix(\"[ .02 0;\"", "P.dot( model.P1 ).dot( backward_likelihood(i+1, trace) ), model.M[:, trace[i][1]] ) if", "traces loaded T T = 1 # Generate traces traces_factory", "len( trace ) likelihoods = np.ones((N, 1)) if i <", "likelihoods = np.ones((N, 1)) if i < M: P =", "0.85]\") def backward_likelihood(i, trace): N = model.N M = len(", "\"0 0.5 0;\" \"0 0 0.9]\") M = np.matrix(\"[0.25 0", "0;\" \"0 0.23 0;\" \"0 0 0.85]\") def backward_likelihood(i, trace):", "import model import numpy as np import datasetReader as df", "import datasetReader as df import main # Number of traces", "1)) if i < M: P = main.randomization(P0, model.uniformization_rate, trace[i][0])", "backward_likelihood(i+1, trace) ), model.M[:, trace[i][1]] ) if likelihoods.sum() != 0:", "model.P1 ).dot( backward_likelihood(i+1, trace) ), model.M[:, trace[i][1]] ) if likelihoods.sum()", "numpy as np import datasetReader as df import main #", "model import numpy as np import datasetReader as df import" ]
[ "do not use this object directly. imageCanvas = vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3)", "imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) # intensity scale", "using colors. # This is not a pipeline object. It", "directly. imageCanvas = vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) #", "will support pipeline objects. # Please do not use this", "imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert = vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack =", "GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320) viewer.Render() # --- end of script", "pipeline objects. # Please do not use this object directly.", "intensity scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210)", "= vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast = vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer", "imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) # r, g, b imageCanvas.SetDrawColor(255,0,0)", "imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) # intensity scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210)", "painter to draw using colors. # This is not a", "imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) # saturation scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320)", "= vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) # r, g,", "r, g, b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128)", "# saturation scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48)", "pipeline object. It will support pipeline objects. # Please do", "#viewer SetInputConnection [imageCanvas GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320) viewer.Render() # ---", "imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320)", "cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer = vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection [imageCanvas", "imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert", "convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack = vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast = vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat()", "imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) # saturation scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16)", "imageCanvas.FillBox(0,511,0,511) # r, g, b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0)", "imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) # saturation scale imageCanvas.SetDrawColor(245,0,0)", "vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast = vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer =", "object directly. imageCanvas = vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511)", "colors. # This is not a pipeline object. It will", "imageCanvas.FillBox(250,300,110,210) # saturation scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320)", "the painter to draw using colors. # This is not", "viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection [imageCanvas GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320) viewer.Render() #", "imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210)", "# Use the painter to draw using colors. # This", "imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) # r, g, b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100)", "import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() #", "vtkGetDataRoot() # Use the painter to draw using colors. #", "vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack = vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast = vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort())", "vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Use the painter", "imageCanvas.FillBox(250,300,0,100) # intensity scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210)", "This is not a pipeline object. It will support pipeline", "python import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot()", "imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert = vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack = vtk.vtkImageHSVToRGB()", "imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) # intensity scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210)", "cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer = vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection [imageCanvas GetOutputPort]", "Use the painter to draw using colors. # This is", "imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert = vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack = vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort())", "imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100)", "VTK_DATA_ROOT = vtkGetDataRoot() # Use the painter to draw using", "SetInputConnection [imageCanvas GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320) viewer.Render() # --- end", "vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection [imageCanvas GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320) viewer.Render()", "= vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer = vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer", "# intensity scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155)", "viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320) viewer.Render() # --- end of script --", "scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205)", "cast = vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer = vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort())", "to draw using colors. # This is not a pipeline", "imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80)", "imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320)", "not use this object directly. imageCanvas = vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar()", "this object directly. imageCanvas = vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0)", "imageCanvas.FillBox(250,300,220,320) convert = vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack = vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast", "Please do not use this object directly. imageCanvas = vtk.vtkImageCanvasSource2D()", "import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Use the painter to", "not a pipeline object. It will support pipeline objects. #", "imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) #", "imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) # r, g, b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0)", "saturation scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320)", "= vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection [imageCanvas GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320)", "imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210)", "# Please do not use this object directly. imageCanvas =", "object. It will support pipeline objects. # Please do not", "# r, g, b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100)", "vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) # r, g, b", "objects. # Please do not use this object directly. imageCanvas", "imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) # saturation scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32)", "vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Use", "# This is not a pipeline object. It will support", "imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) # intensity scale imageCanvas.SetDrawColor(5,5,5)", "imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert =", "imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) # saturation scale", "convertBack.SetInputConnection(convert.GetOutputPort()) cast = vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer = vtk.vtkImageViewer()", "[imageCanvas GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) viewer.SetSize(320,320) viewer.Render() # --- end of", "imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert = vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack", "draw using colors. # This is not a pipeline object.", "scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320) imageCanvas.SetDrawColor(181,32,32) imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64)", "= vtkGetDataRoot() # Use the painter to draw using colors.", "viewer = vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection [imageCanvas GetOutputPort] viewer.SetColorWindow(256) viewer.SetColorLevel(127.5)", "imageCanvas = vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0) imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) # r,", "convert = vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack = vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast =", "imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert = vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort())", "convertBack = vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast = vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff()", "It will support pipeline objects. # Please do not use", "imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) # intensity", "g, b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100)", "b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255)", "imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) # intensity scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105)", "imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) # saturation scale imageCanvas.SetDrawColor(245,0,0) imageCanvas.FillBox(0,50,220,320) imageCanvas.SetDrawColor(213,16,16) imageCanvas.FillBox(50,100,220,320)", "vtk.vtkImageCast() cast.SetInputConnection(convertBack.GetOutputPort()) cast.SetOutputScalarTypeToFloat() cast.ReleaseDataFlagOff() viewer = vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection", "imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128)", "imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) #", "imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255) imageCanvas.FillBox(250,300,110,210) # saturation", "#!/usr/bin/env python import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT =", "support pipeline objects. # Please do not use this object", "vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Use the painter to draw", "= vtk.vtkImageRGBToHSV() convert.SetInputConnection(imageCanvas.GetOutputPort()) convertBack = vtk.vtkImageHSVToRGB() convertBack.SetInputConnection(convert.GetOutputPort()) cast = vtk.vtkImageCast()", "is not a pipeline object. It will support pipeline objects.", "imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100) imageCanvas.SetDrawColor(0,255,0) imageCanvas.FillBox(100,150,0,100) imageCanvas.SetDrawColor(0,128,128) imageCanvas.FillBox(150,200,0,100) imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100)", "imageCanvas.SetDrawColor(0,0,255) imageCanvas.FillBox(200,250,0,100) imageCanvas.SetDrawColor(128,0,128) imageCanvas.FillBox(250,300,0,100) # intensity scale imageCanvas.SetDrawColor(5,5,5) imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55)", "a pipeline object. It will support pipeline objects. # Please", "imageCanvas.SetDrawColor(0,0,0) imageCanvas.FillBox(0,511,0,511) # r, g, b imageCanvas.SetDrawColor(255,0,0) imageCanvas.FillBox(0,50,0,100) imageCanvas.SetDrawColor(128,128,0) imageCanvas.FillBox(50,100,0,100)", "from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Use the", "cast.ReleaseDataFlagOff() viewer = vtk.vtkImageViewer() viewer.SetInputConnection(convertBack.GetOutputPort()) #viewer SetInputConnection [imageCanvas GetOutputPort] viewer.SetColorWindow(256)", "imageCanvas.FillBox(100,150,220,320) imageCanvas.SetDrawColor(149,48,48) imageCanvas.FillBox(150,200,220,320) imageCanvas.SetDrawColor(117,64,64) imageCanvas.FillBox(200,250,220,320) imageCanvas.SetDrawColor(85,80,80) imageCanvas.FillBox(250,300,220,320) convert = vtk.vtkImageRGBToHSV()", "use this object directly. imageCanvas = vtk.vtkImageCanvasSource2D() imageCanvas.SetNumberOfScalarComponents(3) imageCanvas.SetScalarTypeToUnsignedChar() imageCanvas.SetExtent(0,320,0,320,0,0)", "imageCanvas.FillBox(0,50,110,210) imageCanvas.SetDrawColor(55,55,55) imageCanvas.FillBox(50,100,110,210) imageCanvas.SetDrawColor(105,105,105) imageCanvas.FillBox(100,150,110,210) imageCanvas.SetDrawColor(155,155,155) imageCanvas.FillBox(150,200,110,210) imageCanvas.SetDrawColor(205,205,205) imageCanvas.FillBox(200,250,110,210) imageCanvas.SetDrawColor(255,255,255)" ]
[ "with open('kelas_2b/echa.csv', 'r') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',') for", "= csv.reader(csvfile, delimiter=',') for row in csv_reader: print(\"menampilkan data barang:\",", "for row in csv_reader: print(\"menampilkan data barang:\", row[0], row[1], row[2],", "import csv class echa: def werehousing(self): with open('kelas_2b/echa.csv', 'r') as", "as csvfile: csv_reader = csv.reader(csvfile, delimiter=',') for row in csv_reader:", "class echa: def werehousing(self): with open('kelas_2b/echa.csv', 'r') as csvfile: csv_reader", "<reponame>barizraihan/belajarpython import csv class echa: def werehousing(self): with open('kelas_2b/echa.csv', 'r')", "csv class echa: def werehousing(self): with open('kelas_2b/echa.csv', 'r') as csvfile:", "delimiter=',') for row in csv_reader: print(\"menampilkan data barang:\", row[0], row[1],", "echa: def werehousing(self): with open('kelas_2b/echa.csv', 'r') as csvfile: csv_reader =", "'r') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',') for row in", "werehousing(self): with open('kelas_2b/echa.csv', 'r') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',')", "csv_reader = csv.reader(csvfile, delimiter=',') for row in csv_reader: print(\"menampilkan data", "row in csv_reader: print(\"menampilkan data barang:\", row[0], row[1], row[2], row[3],", "open('kelas_2b/echa.csv', 'r') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',') for row", "csvfile: csv_reader = csv.reader(csvfile, delimiter=',') for row in csv_reader: print(\"menampilkan", "in csv_reader: print(\"menampilkan data barang:\", row[0], row[1], row[2], row[3], row[4])", "def werehousing(self): with open('kelas_2b/echa.csv', 'r') as csvfile: csv_reader = csv.reader(csvfile,", "csv.reader(csvfile, delimiter=',') for row in csv_reader: print(\"menampilkan data barang:\", row[0]," ]
[ "im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] :", "2.0 (the \"License\"); # you may not use this file", "import unittest from typing import Tuple import numpy as np", "(99, 99, 99) ) -> np.ndarray: \"\"\" Return a 3D", "[torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)] sampler_sphere_zeros = torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 = [sampler_sphere,", "def _val_func(engine, batch): pass engine = Engine(_val_func) sur_metric.attach(engine, \"surface_distance\") y_pred,", "create See also: :py:meth:`~create_test_image_3d` \"\"\" # Create image image =", "radius: float = 20.0, centre: Tuple[int, int, int] = (49,", "# TODO test multi node Surface Distance def test_compute(self): sur_metric", "will be 1 inside the sphere, and 0 elsewhere. Args:", "= [sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3 = [sampler_sphere_zeros,", "SurfaceDistance(include_background=True) with self.assertRaises((AssertionError, ValueError)): y_pred = TEST_SAMPLE_1[0] y = torch.ones((1,", "20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape:", "of voxels, can be partial) centre: location of sphere centre.", "Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int,", "= (spx * spx + spy * spy + spz", "import torch from ignite.engine import Engine from monai.handlers import SurfaceDistance", "y = torch.ones((1, 1, 10, 10, 10)) sur_metric.update([y_pred, y]) if", "use this file except in compliance with the License. #", "of number of voxels, can be partial) centre: location of", "import numpy as np import torch from ignite.engine import Engine", "sampler_sphere_gt] TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4", "1, 10, 10, 10)) sur_metric.update([y_pred, y]) if __name__ == \"__main__\":", "\"\"\" # Create image image = np.zeros(im_shape, dtype=np.int32) spy, spx,", "a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless", "Args: radius: radius of sphere (in terms of number of", "20))).unsqueeze(0).unsqueeze(0) # test input a list of channel-first tensor sampler_sphere_gt", "ignite.engine import Engine from monai.handlers import SurfaceDistance def create_spherical_seg_3d( radius:", "License. # You may obtain a copy of the License", "Create image image = np.zeros(im_shape, dtype=np.int32) spy, spx, spz =", "y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) y_pred, y = TEST_SAMPLE_4 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(),", "sampler_sphere_zeros = torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2 = [sampler_sphere_gt,", "places=4) y_pred, y = TEST_SAMPLE_3 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) y_pred,", "be 1 inside the sphere, and 0 elsewhere. Args: radius:", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "radius: radius of sphere (in terms of number of voxels,", "self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) def test_shape_mismatch(self): sur_metric = SurfaceDistance(include_background=True) with self.assertRaises((AssertionError, ValueError)):", "inside the sphere, and 0 elsewhere. Args: radius: radius of", "spx + spy * spy + spz * spz) <=", "-centre[2] : im_shape[2] - centre[2] ] circle = (spx *", "at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "def test_shape_mismatch(self): sur_metric = SurfaceDistance(include_background=True) with self.assertRaises((AssertionError, ValueError)): y_pred =", "Surface Distance def test_compute(self): sur_metric = SurfaceDistance(include_background=True) def _val_func(engine, batch):", "TEST_SAMPLE_2 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4) y_pred, y = TEST_SAMPLE_3", "values will be 1 inside the sphere, and 0 elsewhere.", "int, int] = (49, 49, 49), im_shape: Tuple[int, int, int]", "20, 20))).unsqueeze(0).unsqueeze(0) # test input a list of channel-first tensor", "a list of channel-first tensor sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20,", "sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) def test_shape_mismatch(self): sur_metric = SurfaceDistance(include_background=True) with", "in compliance with the License. # You may obtain a", "centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int,", "software # distributed under the License is distributed on an", "from typing import Tuple import numpy as np import torch", "import Engine from monai.handlers import SurfaceDistance def create_spherical_seg_3d( radius: float", "y_pred, y = TEST_SAMPLE_1 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4) y_pred,", "99, 99) ) -> np.ndarray: \"\"\" Return a 3D image", "sur_metric = SurfaceDistance(include_background=True) with self.assertRaises((AssertionError, ValueError)): y_pred = TEST_SAMPLE_1[0] y", "Copyright 2020 - 2021 MONAI Consortium # Licensed under the", "y_pred = TEST_SAMPLE_1[0] y = torch.ones((1, 1, 10, 10, 10))", "self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4) y_pred, y = TEST_SAMPLE_3 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(),", "np.ndarray: \"\"\" Return a 3D image with a sphere inside.", "sampler_sphere_zeros] class TestHandlerSurfaceDistance(unittest.TestCase): # TODO test multi node Surface Distance", "2.08566, places=4) y_pred, y = TEST_SAMPLE_3 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\"))", "shape of image to create See also: :py:meth:`~create_test_image_3d` \"\"\" #", "Distance def test_compute(self): sur_metric = SurfaceDistance(include_background=True) def _val_func(engine, batch): pass", "- centre[1], -centre[2] : im_shape[2] - centre[2] ] circle =", "import SurfaceDistance def create_spherical_seg_3d( radius: float = 20.0, centre: Tuple[int,", "99) ) -> np.ndarray: \"\"\" Return a 3D image with", "a 3D image with a sphere inside. Voxel values will", "places=4) y_pred, y = TEST_SAMPLE_2 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4)", "test_shape_mismatch(self): sur_metric = SurfaceDistance(include_background=True) with self.assertRaises((AssertionError, ValueError)): y_pred = TEST_SAMPLE_1[0]", "im_shape[2] - centre[2] ] circle = (spx * spx +", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= 1 image[~circle] = 0 return image sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20,", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "sphere centre. im_shape: shape of image to create See also:", "the License. # You may obtain a copy of the", "for the specific language governing permissions and # limitations under", "TEST_SAMPLE_4 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) def test_shape_mismatch(self): sur_metric = SurfaceDistance(include_background=True)", "also: :py:meth:`~create_test_image_3d` \"\"\" # Create image image = np.zeros(im_shape, dtype=np.int32)", "to in writing, software # distributed under the License is", "# test input a list of channel-first tensor sampler_sphere_gt =", "# See the License for the specific language governing permissions", "def create_spherical_seg_3d( radius: float = 20.0, centre: Tuple[int, int, int]", "input a list of channel-first tensor sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10,", "TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros] class TestHandlerSurfaceDistance(unittest.TestCase): # TODO test multi", "MONAI Consortium # Licensed under the Apache License, Version 2.0", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "- centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2]", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "spx, spz = np.ogrid[ -centre[0] : im_shape[0] - centre[0], -centre[1]", "with the License. # You may obtain a copy of", "y = TEST_SAMPLE_3 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) y_pred, y =", "= (99, 99, 99) ) -> np.ndarray: \"\"\" Return a", "self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4) y_pred, y = TEST_SAMPLE_2 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(),", "and 0 elsewhere. Args: radius: radius of sphere (in terms", "Tuple import numpy as np import torch from ignite.engine import", "= (49, 49, 49), im_shape: Tuple[int, int, int] = (99,", ") -> np.ndarray: \"\"\" Return a 3D image with a", "spy + spz * spz) <= radius * radius image[circle]", "spz = np.ogrid[ -centre[0] : im_shape[0] - centre[0], -centre[1] :", "float(\"inf\")) def test_shape_mismatch(self): sur_metric = SurfaceDistance(include_background=True) with self.assertRaises((AssertionError, ValueError)): y_pred", "TODO test multi node Surface Distance def test_compute(self): sur_metric =", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\"", "centre. im_shape: shape of image to create See also: :py:meth:`~create_test_image_3d`", "y_pred, y = TEST_SAMPLE_3 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) y_pred, y", "= TEST_SAMPLE_3 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) y_pred, y = TEST_SAMPLE_4", "Tuple[int, int, int] = (99, 99, 99) ) -> np.ndarray:", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "import Tuple import numpy as np import torch from ignite.engine", "im_shape: Tuple[int, int, int] = (99, 99, 99) ) ->", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "image = np.zeros(im_shape, dtype=np.int32) spy, spx, spz = np.ogrid[ -centre[0]", "not use this file except in compliance with the License.", "= np.zeros(im_shape, dtype=np.int32) spy, spx, spz = np.ogrid[ -centre[0] :", "writing, software # distributed under the License is distributed on", "License. import unittest from typing import Tuple import numpy as", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "from monai.handlers import SurfaceDistance def create_spherical_seg_3d( radius: float = 20.0,", "# Create image image = np.zeros(im_shape, dtype=np.int32) spy, spx, spz", "int, int] = (99, 99, 99) ) -> np.ndarray: \"\"\"", "image to create See also: :py:meth:`~create_test_image_3d` \"\"\" # Create image", "of channel-first tensor sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)] sampler_sphere_zeros", "CONDITIONS OF ANY KIND, either express or implied. # See", "test_compute(self): sur_metric = SurfaceDistance(include_background=True) def _val_func(engine, batch): pass engine =", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "Consortium # Licensed under the Apache License, Version 2.0 (the", "TestHandlerSurfaceDistance(unittest.TestCase): # TODO test multi node Surface Distance def test_compute(self):", "http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "governing permissions and # limitations under the License. import unittest", "sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4) y_pred, y = TEST_SAMPLE_3 sur_metric.update([y_pred,", "engine = Engine(_val_func) sur_metric.attach(engine, \"surface_distance\") y_pred, y = TEST_SAMPLE_1 sur_metric.update([y_pred,", "image image = np.zeros(im_shape, dtype=np.int32) spy, spx, spz = np.ogrid[", "and # limitations under the License. import unittest from typing", "SurfaceDistance(include_background=True) def _val_func(engine, batch): pass engine = Engine(_val_func) sur_metric.attach(engine, \"surface_distance\")", "= TEST_SAMPLE_2 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4) y_pred, y =", "Engine(_val_func) sur_metric.attach(engine, \"surface_distance\") y_pred, y = TEST_SAMPLE_1 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(),", "OR CONDITIONS OF ANY KIND, either express or implied. #", "spy, spx, spz = np.ogrid[ -centre[0] : im_shape[0] - centre[0],", "int] = (99, 99, 99) ) -> np.ndarray: \"\"\" Return", "ValueError)): y_pred = TEST_SAMPLE_1[0] y = torch.ones((1, 1, 10, 10,", "test multi node Surface Distance def test_compute(self): sur_metric = SurfaceDistance(include_background=True)", "the License is distributed on an \"AS IS\" BASIS, #", "elsewhere. Args: radius: radius of sphere (in terms of number", "= 20.0, centre: Tuple[int, int, int] = (49, 49, 49),", "radius * radius image[circle] = 1 image[~circle] = 0 return", "= [sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4 = [sampler_sphere_zeros,", "be partial) centre: location of sphere centre. im_shape: shape of", "SurfaceDistance def create_spherical_seg_3d( radius: float = 20.0, centre: Tuple[int, int,", "= 0 return image sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)", "y]) self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4) y_pred, y = TEST_SAMPLE_2 sur_metric.update([y_pred, y])", "np import torch from ignite.engine import Engine from monai.handlers import", "= SurfaceDistance(include_background=True) def _val_func(engine, batch): pass engine = Engine(_val_func) sur_metric.attach(engine,", "# limitations under the License. import unittest from typing import", "TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4 =", "law or agreed to in writing, software # distributed under", "49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)", "1 inside the sphere, and 0 elsewhere. Args: radius: radius", "sampler_sphere_gt] TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros] class", "[sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]", "= Engine(_val_func) sur_metric.attach(engine, \"surface_distance\") y_pred, y = TEST_SAMPLE_1 sur_metric.update([y_pred, y])", "License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "Voxel values will be 1 inside the sphere, and 0", "# Copyright 2020 - 2021 MONAI Consortium # Licensed under", "of image to create See also: :py:meth:`~create_test_image_3d` \"\"\" # Create", "] circle = (spx * spx + spy * spy", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "circle = (spx * spx + spy * spy +", "y_pred, y = TEST_SAMPLE_4 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) def test_shape_mismatch(self):", "\"\"\" Return a 3D image with a sphere inside. Voxel", "Engine from monai.handlers import SurfaceDistance def create_spherical_seg_3d( radius: float =", "may not use this file except in compliance with the", "torch.ones((1, 1, 10, 10, 10)) sur_metric.update([y_pred, y]) if __name__ ==", "= torch.ones((1, 1, 10, 10, 10)) sur_metric.update([y_pred, y]) if __name__", "[sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros] class TestHandlerSurfaceDistance(unittest.TestCase): # TODO", "image[~circle] = 0 return image sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "10, 10, 10)) sur_metric.update([y_pred, y]) if __name__ == \"__main__\": unittest.main()", "spz * spz) <= radius * radius image[circle] = 1", "int] = (49, 49, 49), im_shape: Tuple[int, int, int] =", "radius image[circle] = 1 image[~circle] = 0 return image sampler_sphere", "= TEST_SAMPLE_1[0] y = torch.ones((1, 1, 10, 10, 10)) sur_metric.update([y_pred,", "of sphere (in terms of number of voxels, can be", "number of voxels, can be partial) centre: location of sphere", "= TEST_SAMPLE_4 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) def test_shape_mismatch(self): sur_metric =", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "create_spherical_seg_3d( radius: float = 20.0, centre: Tuple[int, int, int] =", "y = TEST_SAMPLE_2 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4) y_pred, y", "sphere (in terms of number of voxels, can be partial)", "See also: :py:meth:`~create_test_image_3d` \"\"\" # Create image image = np.zeros(im_shape,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "partial) centre: location of sphere centre. im_shape: shape of image", "[sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]", "sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0) # test input a", "2020 - 2021 MONAI Consortium # Licensed under the Apache", "voxels, can be partial) centre: location of sphere centre. im_shape:", "np.ogrid[ -centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] -", "sphere inside. Voxel values will be 1 inside the sphere,", "-> np.ndarray: \"\"\" Return a 3D image with a sphere", "Return a 3D image with a sphere inside. Voxel values", "torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3", ": im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2] ]", "of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by", "spy * spy + spz * spz) <= radius *", "language governing permissions and # limitations under the License. import", "self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) y_pred, y = TEST_SAMPLE_4 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\"))", "of sphere centre. im_shape: shape of image to create See", "centre: location of sphere centre. im_shape: shape of image to", "the License. import unittest from typing import Tuple import numpy", "- centre[2] ] circle = (spx * spx + spy", "channel-first tensor sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)] sampler_sphere_zeros =", "_val_func(engine, batch): pass engine = Engine(_val_func) sur_metric.attach(engine, \"surface_distance\") y_pred, y", "<= radius * radius image[circle] = 1 image[~circle] = 0", "torch from ignite.engine import Engine from monai.handlers import SurfaceDistance def", "+ spz * spz) <= radius * radius image[circle] =", "or implied. # See the License for the specific language", "def test_compute(self): sur_metric = SurfaceDistance(include_background=True) def _val_func(engine, batch): pass engine", "node Surface Distance def test_compute(self): sur_metric = SurfaceDistance(include_background=True) def _val_func(engine,", "radius of sphere (in terms of number of voxels, can", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", ":py:meth:`~create_test_image_3d` \"\"\" # Create image image = np.zeros(im_shape, dtype=np.int32) spy,", "49), im_shape: Tuple[int, int, int] = (99, 99, 99) )", "image sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0) # test input", "np.zeros(im_shape, dtype=np.int32) spy, spx, spz = np.ogrid[ -centre[0] : im_shape[0]", "* radius image[circle] = 1 image[~circle] = 0 return image", "TEST_SAMPLE_3 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) y_pred, y = TEST_SAMPLE_4 sur_metric.update([y_pred,", "copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required", "(in terms of number of voxels, can be partial) centre:", "(the \"License\"); # you may not use this file except", "typing import Tuple import numpy as np import torch from", "a sphere inside. Voxel values will be 1 inside the", "# you may not use this file except in compliance", "centre[1], -centre[2] : im_shape[2] - centre[2] ] circle = (spx", "sphere, and 0 elsewhere. Args: radius: radius of sphere (in", "* spy + spz * spz) <= radius * radius", "sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) y_pred, y = TEST_SAMPLE_4 sur_metric.update([y_pred, y])", "with a sphere inside. Voxel values will be 1 inside", "to create See also: :py:meth:`~create_test_image_3d` \"\"\" # Create image image", "0 elsewhere. Args: radius: radius of sphere (in terms of", "y = TEST_SAMPLE_4 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) def test_shape_mismatch(self): sur_metric", "(49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99,", "Version 2.0 (the \"License\"); # you may not use this", "may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0", "TEST_SAMPLE_1 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4) y_pred, y = TEST_SAMPLE_2", "-centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]", "[sampler_sphere_zeros, sampler_sphere_zeros] class TestHandlerSurfaceDistance(unittest.TestCase): # TODO test multi node Surface", "= torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]", "= [sampler_sphere_zeros, sampler_sphere_zeros] class TestHandlerSurfaceDistance(unittest.TestCase): # TODO test multi node", "0 return image sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0) #", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "with self.assertRaises((AssertionError, ValueError)): y_pred = TEST_SAMPLE_1[0] y = torch.ones((1, 1,", "= np.ogrid[ -centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1]", "centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0) # test input a list of channel-first", "by applicable law or agreed to in writing, software #", "3D image with a sphere inside. Voxel values will be", ": im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2]", "class TestHandlerSurfaceDistance(unittest.TestCase): # TODO test multi node Surface Distance def", "return image sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0) # test", "y = TEST_SAMPLE_1 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4) y_pred, y", "inside. Voxel values will be 1 inside the sphere, and", "the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "location of sphere centre. im_shape: shape of image to create", ": im_shape[2] - centre[2] ] circle = (spx * spx", "= SurfaceDistance(include_background=True) with self.assertRaises((AssertionError, ValueError)): y_pred = TEST_SAMPLE_1[0] y =", "test input a list of channel-first tensor sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20,", "# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)] sampler_sphere_zeros = torch.zeros_like(sampler_sphere) TEST_SAMPLE_1", "* spz) <= radius * radius image[circle] = 1 image[~circle]", "float = 20.0, centre: Tuple[int, int, int] = (49, 49,", "-centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1],", "im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2] ] circle", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "multi node Surface Distance def test_compute(self): sur_metric = SurfaceDistance(include_background=True) def", "terms of number of voxels, can be partial) centre: location", "the specific language governing permissions and # limitations under the", "TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt] TEST_SAMPLE_3 =", "= TEST_SAMPLE_1 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4) y_pred, y =", "applicable law or agreed to in writing, software # distributed", "self.assertRaises((AssertionError, ValueError)): y_pred = TEST_SAMPLE_1[0] y = torch.ones((1, 1, 10,", "sur_metric = SurfaceDistance(include_background=True) def _val_func(engine, batch): pass engine = Engine(_val_func)", "centre[2] ] circle = (spx * spx + spy *", "y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) def test_shape_mismatch(self): sur_metric = SurfaceDistance(include_background=True) with self.assertRaises((AssertionError,", "+ spy * spy + spz * spz) <= radius", "in writing, software # distributed under the License is distributed", "* spx + spy * spy + spz * spz)", "numpy as np import torch from ignite.engine import Engine from", "under the License. import unittest from typing import Tuple import", "sampler_sphere_gt] TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros] class TestHandlerSurfaceDistance(unittest.TestCase): # TODO test", "sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4) y_pred, y = TEST_SAMPLE_2 sur_metric.update([y_pred,", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "im_shape: shape of image to create See also: :py:meth:`~create_test_image_3d` \"\"\"", "20))).unsqueeze(0)] sampler_sphere_zeros = torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2 =", "TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros] class TestHandlerSurfaceDistance(unittest.TestCase):", "list of channel-first tensor sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]", "batch): pass engine = Engine(_val_func) sur_metric.attach(engine, \"surface_distance\") y_pred, y =", "the sphere, and 0 elsewhere. Args: radius: radius of sphere", "the License for the specific language governing permissions and #", "2021 MONAI Consortium # Licensed under the Apache License, Version", "y_pred, y = TEST_SAMPLE_2 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4) y_pred,", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "- 2021 MONAI Consortium # Licensed under the Apache License,", "spz) <= radius * radius image[circle] = 1 image[~circle] =", "centre=(10, 20, 20))).unsqueeze(0)] sampler_sphere_zeros = torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]", "obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #", "from ignite.engine import Engine from monai.handlers import SurfaceDistance def create_spherical_seg_3d(", "4.17133, places=4) y_pred, y = TEST_SAMPLE_2 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 2.08566,", "20, 20))).unsqueeze(0)] sampler_sphere_zeros = torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt] TEST_SAMPLE_2", "limitations under the License. import unittest from typing import Tuple", "as np import torch from ignite.engine import Engine from monai.handlers", "monai.handlers import SurfaceDistance def create_spherical_seg_3d( radius: float = 20.0, centre:", "unittest from typing import Tuple import numpy as np import", "dtype=np.int32) spy, spx, spz = np.ogrid[ -centre[0] : im_shape[0] -", "TEST_SAMPLE_1[0] y = torch.ones((1, 1, 10, 10, 10)) sur_metric.update([y_pred, y])", "\"surface_distance\") y_pred, y = TEST_SAMPLE_1 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4)", "can be partial) centre: location of sphere centre. im_shape: shape", "float(\"inf\")) y_pred, y = TEST_SAMPLE_4 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), float(\"inf\")) def", "\"License\"); # you may not use this file except in", "1 image[~circle] = 0 return image sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20,", "permissions and # limitations under the License. import unittest from", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0) # test input a list", "# distributed under the License is distributed on an \"AS", "image[circle] = 1 image[~circle] = 0 return image sampler_sphere =", "# Unless required by applicable law or agreed to in", "y]) self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4) y_pred, y = TEST_SAMPLE_3 sur_metric.update([y_pred, y])", "pass engine = Engine(_val_func) sur_metric.attach(engine, \"surface_distance\") y_pred, y = TEST_SAMPLE_1", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] -", "(spx * spx + spy * spy + spz *", "You may obtain a copy of the License at #", "= [sampler_sphere_zeros, sampler_sphere_gt] TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros] class TestHandlerSurfaceDistance(unittest.TestCase): #", "image with a sphere inside. Voxel values will be 1", "torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0) # test input a list of", "tensor sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)] sampler_sphere_zeros = torch.zeros_like(sampler_sphere)", "= [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)] sampler_sphere_zeros = torch.zeros_like(sampler_sphere) TEST_SAMPLE_1 =", "the Apache License, Version 2.0 (the \"License\"); # you may", "sur_metric.attach(engine, \"surface_distance\") y_pred, y = TEST_SAMPLE_1 sur_metric.update([y_pred, y]) self.assertAlmostEqual(sur_metric.compute(), 4.17133," ]
[ "spec.get(\"novelty\", True) normal_classes = spec[\"normal_classes\"] precision = defaultdict(list) for rounds", "specification missing from the command line arguments\") exit(1) spec_file =", "= df.sample(n=max_rows, replace=False) label_col = spec[\"label_column\"] y = df[label_col].values other", "X_train = X_train[keep, :] y_train = y_train[keep] # training #flows,", "prec = ref[anomaly_indices].sum() / k logging.info(\"%s: %.1f%% (%d anomalies /", "logging.info(\"* SUMMARY %s\", spec_file) for name, prec in precision.items(): prec", "keep = np.where(np.isin(y_train, normal_classes))[0] X_train = X_train[keep, :] y_train =", "# imputing X = SimpleImputer(copy=False).fit_transform(X) # train/test split X_train, X_test,", "import SimpleImputer from anoflows.hpo import find_best_flows from data_loading import load_data", "{ \"anoflows\": flows.likelihood(X_test), \"iforest\": iforest.decision_function(X_test) } # evaluation y_true =", "min(len(df), spec.get(\"max_rows\", 40000)) novelty_detection = spec.get(\"novelty\", True) normal_classes = spec[\"normal_classes\"]", "# random sampling df = df.sample(n=max_rows, replace=False) label_col = spec[\"label_column\"]", "numpy as np from collections import defaultdict from sklearn.model_selection import", "from the command line arguments\") exit(1) spec_file = sys.argv[1] df,", "X_train, X_test, y_train, y_test = \\ train_test_split(X, y, shuffle=False, test_size=0.5)", "name, y_pred in pred.items(): anomaly_indices = y_pred.argsort()[:k] prec = ref[anomaly_indices].sum()", "find_best_flows from data_loading import load_data logging.getLogger().setLevel(logging.INFO) if len(sys.argv) == 1:", "import IsolationForest from sklearn.impute import SimpleImputer from anoflows.hpo import find_best_flows", "command line arguments\") exit(1) spec_file = sys.argv[1] df, spec =", "prediction pred = { \"anoflows\": flows.likelihood(X_test), \"iforest\": iforest.decision_function(X_test) } #", "(%d anomalies / %d rows)\" % (name, 100*prec, k, len(y_test)))", "np.mean(prec) std = np.std(prec) logging.info(\"%s; mean=%.1f%% std=%.1f%%\" % (name, mean,", "X = other.values # imputing X = SimpleImputer(copy=False).fit_transform(X) # train/test", "defaultdict from sklearn.model_selection import train_test_split from sklearn.ensemble import IsolationForest from", "AnoFlowBagging() flows.fit(X_train) iforest = IsolationForest().fit(X_train) # prediction pred = {", "True) normal_classes = spec[\"normal_classes\"] precision = defaultdict(list) for rounds in", "= ref[anomaly_indices].sum() / k logging.info(\"%s: %.1f%% (%d anomalies / %d", "= df[label_col].values other = df.drop(label_col, inplace=False, axis=1) X = other.values", "IsolationForest().fit(X_train) # prediction pred = { \"anoflows\": flows.likelihood(X_test), \"iforest\": iforest.decision_function(X_test)", "from data_loading import load_data logging.getLogger().setLevel(logging.INFO) if len(sys.argv) == 1: logging.error(\"YAML", "exit(1) spec_file = sys.argv[1] df, spec = load_data(spec_file) max_rows =", "import train_test_split from sklearn.ensemble import IsolationForest from sklearn.impute import SimpleImputer", "len(y_true) for name, y_pred in pred.items(): anomaly_indices = y_pred.argsort()[:k] prec", "#!/usr/bin/env python3 import sys import logging import yaml import pandas", "len(y_test))) precision[name].append(prec) logging.info(\"* SUMMARY %s\", spec_file) for name, prec in", "precision[name].append(prec) logging.info(\"* SUMMARY %s\", spec_file) for name, prec in precision.items():", "import load_data logging.getLogger().setLevel(logging.INFO) if len(sys.argv) == 1: logging.error(\"YAML data specification", "find_best_flows(X_train, device='cpu', n_trials=1) from anoflows.anoflow_bagging import AnoFlowBagging flows = AnoFlowBagging()", "evaluation y_true = np.where(np.isin(y_test, spec[\"anomaly_classes\"]))[0] ref = np.zeros(len(y_test)) ref[y_true] =", "pandas as pd import numpy as np from collections import", "y_true = np.where(np.isin(y_test, spec[\"anomaly_classes\"]))[0] ref = np.zeros(len(y_test)) ref[y_true] = 1", "yaml import pandas as pd import numpy as np from", "= 100 * np.array(prec) mean = np.mean(prec) std = np.std(prec)", "spec[\"label_column\"] y = df[label_col].values other = df.drop(label_col, inplace=False, axis=1) X", "# train/test split X_train, X_test, y_train, y_test = \\ train_test_split(X,", "import yaml import pandas as pd import numpy as np", "AnoFlowBagging flows = AnoFlowBagging() flows.fit(X_train) iforest = IsolationForest().fit(X_train) # prediction", "pd import numpy as np from collections import defaultdict from", "normal_classes))[0] X_train = X_train[keep, :] y_train = y_train[keep] # training", "pred.items(): anomaly_indices = y_pred.argsort()[:k] prec = ref[anomaly_indices].sum() / k logging.info(\"%s:", "novelty_detection = spec.get(\"novelty\", True) normal_classes = spec[\"normal_classes\"] precision = defaultdict(list)", "df.sample(n=max_rows, replace=False) label_col = spec[\"label_column\"] y = df[label_col].values other =", "label_col = spec[\"label_column\"] y = df[label_col].values other = df.drop(label_col, inplace=False,", "max_rows = min(len(df), spec.get(\"max_rows\", 40000)) novelty_detection = spec.get(\"novelty\", True) normal_classes", "= IsolationForest().fit(X_train) # prediction pred = { \"anoflows\": flows.likelihood(X_test), \"iforest\":", "collections import defaultdict from sklearn.model_selection import train_test_split from sklearn.ensemble import", "X_test, y_train, y_test = \\ train_test_split(X, y, shuffle=False, test_size=0.5) if", "sklearn.impute import SimpleImputer from anoflows.hpo import find_best_flows from data_loading import", "= AnoFlowBagging() flows.fit(X_train) iforest = IsolationForest().fit(X_train) # prediction pred =", "\"iforest\": iforest.decision_function(X_test) } # evaluation y_true = np.where(np.isin(y_test, spec[\"anomaly_classes\"]))[0] ref", "= 1 k = len(y_true) for name, y_pred in pred.items():", "y_test = \\ train_test_split(X, y, shuffle=False, test_size=0.5) if novelty_detection: keep", "np.zeros(len(y_test)) ref[y_true] = 1 k = len(y_true) for name, y_pred", "precision.items(): prec = 100 * np.array(prec) mean = np.mean(prec) std", "= np.mean(prec) std = np.std(prec) logging.info(\"%s; mean=%.1f%% std=%.1f%%\" % (name,", "import AnoFlowBagging flows = AnoFlowBagging() flows.fit(X_train) iforest = IsolationForest().fit(X_train) #", "normal_classes = spec[\"normal_classes\"] precision = defaultdict(list) for rounds in range(spec.get(\"rounds\",", "len(sys.argv) == 1: logging.error(\"YAML data specification missing from the command", "inplace=False, axis=1) X = other.values # imputing X = SimpleImputer(copy=False).fit_transform(X)", "spec.get(\"max_rows\", 40000)) novelty_detection = spec.get(\"novelty\", True) normal_classes = spec[\"normal_classes\"] precision", "if novelty_detection: keep = np.where(np.isin(y_train, normal_classes))[0] X_train = X_train[keep, :]", "rows)\" % (name, 100*prec, k, len(y_test))) precision[name].append(prec) logging.info(\"* SUMMARY %s\",", "# training #flows, loss = find_best_flows(X_train, device='cpu', n_trials=1) from anoflows.anoflow_bagging", "SUMMARY %s\", spec_file) for name, prec in precision.items(): prec =", "df = df.sample(n=max_rows, replace=False) label_col = spec[\"label_column\"] y = df[label_col].values", "= SimpleImputer(copy=False).fit_transform(X) # train/test split X_train, X_test, y_train, y_test =", "python3 import sys import logging import yaml import pandas as", "y = df[label_col].values other = df.drop(label_col, inplace=False, axis=1) X =", "loss = find_best_flows(X_train, device='cpu', n_trials=1) from anoflows.anoflow_bagging import AnoFlowBagging flows", "split X_train, X_test, y_train, y_test = \\ train_test_split(X, y, shuffle=False,", "from anoflows.hpo import find_best_flows from data_loading import load_data logging.getLogger().setLevel(logging.INFO) if", "import find_best_flows from data_loading import load_data logging.getLogger().setLevel(logging.INFO) if len(sys.argv) ==", "test_size=0.5) if novelty_detection: keep = np.where(np.isin(y_train, normal_classes))[0] X_train = X_train[keep,", "ref[anomaly_indices].sum() / k logging.info(\"%s: %.1f%% (%d anomalies / %d rows)\"", "= y_train[keep] # training #flows, loss = find_best_flows(X_train, device='cpu', n_trials=1)", "the command line arguments\") exit(1) spec_file = sys.argv[1] df, spec", "from collections import defaultdict from sklearn.model_selection import train_test_split from sklearn.ensemble", "40000)) novelty_detection = spec.get(\"novelty\", True) normal_classes = spec[\"normal_classes\"] precision =", "X = SimpleImputer(copy=False).fit_transform(X) # train/test split X_train, X_test, y_train, y_test", "from anoflows.anoflow_bagging import AnoFlowBagging flows = AnoFlowBagging() flows.fit(X_train) iforest =", "/ k logging.info(\"%s: %.1f%% (%d anomalies / %d rows)\" %", "logging.info(\"%s: %.1f%% (%d anomalies / %d rows)\" % (name, 100*prec,", "* np.array(prec) mean = np.mean(prec) std = np.std(prec) logging.info(\"%s; mean=%.1f%%", "from sklearn.impute import SimpleImputer from anoflows.hpo import find_best_flows from data_loading", "= find_best_flows(X_train, device='cpu', n_trials=1) from anoflows.anoflow_bagging import AnoFlowBagging flows =", "df[label_col].values other = df.drop(label_col, inplace=False, axis=1) X = other.values #", "\\ train_test_split(X, y, shuffle=False, test_size=0.5) if novelty_detection: keep = np.where(np.isin(y_train,", "= other.values # imputing X = SimpleImputer(copy=False).fit_transform(X) # train/test split", "anomalies / %d rows)\" % (name, 100*prec, k, len(y_test))) precision[name].append(prec)", "= spec[\"label_column\"] y = df[label_col].values other = df.drop(label_col, inplace=False, axis=1)", "%d rows)\" % (name, 100*prec, k, len(y_test))) precision[name].append(prec) logging.info(\"* SUMMARY", "spec[\"normal_classes\"] precision = defaultdict(list) for rounds in range(spec.get(\"rounds\", 1)): #", "} # evaluation y_true = np.where(np.isin(y_test, spec[\"anomaly_classes\"]))[0] ref = np.zeros(len(y_test))", "= sys.argv[1] df, spec = load_data(spec_file) max_rows = min(len(df), spec.get(\"max_rows\",", "y_train[keep] # training #flows, loss = find_best_flows(X_train, device='cpu', n_trials=1) from", "device='cpu', n_trials=1) from anoflows.anoflow_bagging import AnoFlowBagging flows = AnoFlowBagging() flows.fit(X_train)", "flows = AnoFlowBagging() flows.fit(X_train) iforest = IsolationForest().fit(X_train) # prediction pred", "in range(spec.get(\"rounds\", 1)): # random sampling df = df.sample(n=max_rows, replace=False)", "novelty_detection: keep = np.where(np.isin(y_train, normal_classes))[0] X_train = X_train[keep, :] y_train", "name, prec in precision.items(): prec = 100 * np.array(prec) mean", "= np.where(np.isin(y_train, normal_classes))[0] X_train = X_train[keep, :] y_train = y_train[keep]", "data specification missing from the command line arguments\") exit(1) spec_file", "= min(len(df), spec.get(\"max_rows\", 40000)) novelty_detection = spec.get(\"novelty\", True) normal_classes =", "other.values # imputing X = SimpleImputer(copy=False).fit_transform(X) # train/test split X_train,", "SimpleImputer(copy=False).fit_transform(X) # train/test split X_train, X_test, y_train, y_test = \\", "= defaultdict(list) for rounds in range(spec.get(\"rounds\", 1)): # random sampling", "sys.argv[1] df, spec = load_data(spec_file) max_rows = min(len(df), spec.get(\"max_rows\", 40000))", "= X_train[keep, :] y_train = y_train[keep] # training #flows, loss", "= { \"anoflows\": flows.likelihood(X_test), \"iforest\": iforest.decision_function(X_test) } # evaluation y_true", "logging.getLogger().setLevel(logging.INFO) if len(sys.argv) == 1: logging.error(\"YAML data specification missing from", "logging.error(\"YAML data specification missing from the command line arguments\") exit(1)", "np from collections import defaultdict from sklearn.model_selection import train_test_split from", "sklearn.model_selection import train_test_split from sklearn.ensemble import IsolationForest from sklearn.impute import", "IsolationForest from sklearn.impute import SimpleImputer from anoflows.hpo import find_best_flows from", "ref[y_true] = 1 k = len(y_true) for name, y_pred in", "100*prec, k, len(y_test))) precision[name].append(prec) logging.info(\"* SUMMARY %s\", spec_file) for name,", "rounds in range(spec.get(\"rounds\", 1)): # random sampling df = df.sample(n=max_rows,", "arguments\") exit(1) spec_file = sys.argv[1] df, spec = load_data(spec_file) max_rows", "100 * np.array(prec) mean = np.mean(prec) std = np.std(prec) logging.info(\"%s;", "spec = load_data(spec_file) max_rows = min(len(df), spec.get(\"max_rows\", 40000)) novelty_detection =", "n_trials=1) from anoflows.anoflow_bagging import AnoFlowBagging flows = AnoFlowBagging() flows.fit(X_train) iforest", "= np.where(np.isin(y_test, spec[\"anomaly_classes\"]))[0] ref = np.zeros(len(y_test)) ref[y_true] = 1 k", "load_data logging.getLogger().setLevel(logging.INFO) if len(sys.argv) == 1: logging.error(\"YAML data specification missing", "sampling df = df.sample(n=max_rows, replace=False) label_col = spec[\"label_column\"] y =", "in precision.items(): prec = 100 * np.array(prec) mean = np.mean(prec)", "anoflows.anoflow_bagging import AnoFlowBagging flows = AnoFlowBagging() flows.fit(X_train) iforest = IsolationForest().fit(X_train)", "pred = { \"anoflows\": flows.likelihood(X_test), \"iforest\": iforest.decision_function(X_test) } # evaluation", "= spec[\"normal_classes\"] precision = defaultdict(list) for rounds in range(spec.get(\"rounds\", 1)):", "%s\", spec_file) for name, prec in precision.items(): prec = 100", "np.array(prec) mean = np.mean(prec) std = np.std(prec) logging.info(\"%s; mean=%.1f%% std=%.1f%%\"", "sklearn.ensemble import IsolationForest from sklearn.impute import SimpleImputer from anoflows.hpo import", "1: logging.error(\"YAML data specification missing from the command line arguments\")", "k logging.info(\"%s: %.1f%% (%d anomalies / %d rows)\" % (name,", "std = np.std(prec) logging.info(\"%s; mean=%.1f%% std=%.1f%%\" % (name, mean, std))", "if len(sys.argv) == 1: logging.error(\"YAML data specification missing from the", "load_data(spec_file) max_rows = min(len(df), spec.get(\"max_rows\", 40000)) novelty_detection = spec.get(\"novelty\", True)", "/ %d rows)\" % (name, 100*prec, k, len(y_test))) precision[name].append(prec) logging.info(\"*", "= load_data(spec_file) max_rows = min(len(df), spec.get(\"max_rows\", 40000)) novelty_detection = spec.get(\"novelty\",", "flows.fit(X_train) iforest = IsolationForest().fit(X_train) # prediction pred = { \"anoflows\":", "k = len(y_true) for name, y_pred in pred.items(): anomaly_indices =", "train_test_split from sklearn.ensemble import IsolationForest from sklearn.impute import SimpleImputer from", "1)): # random sampling df = df.sample(n=max_rows, replace=False) label_col =", "mean = np.mean(prec) std = np.std(prec) logging.info(\"%s; mean=%.1f%% std=%.1f%%\" %", "X_train[keep, :] y_train = y_train[keep] # training #flows, loss =", "from sklearn.model_selection import train_test_split from sklearn.ensemble import IsolationForest from sklearn.impute", "df, spec = load_data(spec_file) max_rows = min(len(df), spec.get(\"max_rows\", 40000)) novelty_detection", "imputing X = SimpleImputer(copy=False).fit_transform(X) # train/test split X_train, X_test, y_train,", "k, len(y_test))) precision[name].append(prec) logging.info(\"* SUMMARY %s\", spec_file) for name, prec", "sys import logging import yaml import pandas as pd import", "import pandas as pd import numpy as np from collections", "from sklearn.ensemble import IsolationForest from sklearn.impute import SimpleImputer from anoflows.hpo", "axis=1) X = other.values # imputing X = SimpleImputer(copy=False).fit_transform(X) #", "y_train = y_train[keep] # training #flows, loss = find_best_flows(X_train, device='cpu',", "import defaultdict from sklearn.model_selection import train_test_split from sklearn.ensemble import IsolationForest", "import logging import yaml import pandas as pd import numpy", "shuffle=False, test_size=0.5) if novelty_detection: keep = np.where(np.isin(y_train, normal_classes))[0] X_train =", "1 k = len(y_true) for name, y_pred in pred.items(): anomaly_indices", "train/test split X_train, X_test, y_train, y_test = \\ train_test_split(X, y,", "import numpy as np from collections import defaultdict from sklearn.model_selection", "= \\ train_test_split(X, y, shuffle=False, test_size=0.5) if novelty_detection: keep =", "random sampling df = df.sample(n=max_rows, replace=False) label_col = spec[\"label_column\"] y", "for name, y_pred in pred.items(): anomaly_indices = y_pred.argsort()[:k] prec =", "# prediction pred = { \"anoflows\": flows.likelihood(X_test), \"iforest\": iforest.decision_function(X_test) }", "SimpleImputer from anoflows.hpo import find_best_flows from data_loading import load_data logging.getLogger().setLevel(logging.INFO)", "% (name, 100*prec, k, len(y_test))) precision[name].append(prec) logging.info(\"* SUMMARY %s\", spec_file)", "spec_file = sys.argv[1] df, spec = load_data(spec_file) max_rows = min(len(df),", "df.drop(label_col, inplace=False, axis=1) X = other.values # imputing X =", "= np.zeros(len(y_test)) ref[y_true] = 1 k = len(y_true) for name,", ":] y_train = y_train[keep] # training #flows, loss = find_best_flows(X_train,", "\"anoflows\": flows.likelihood(X_test), \"iforest\": iforest.decision_function(X_test) } # evaluation y_true = np.where(np.isin(y_test,", "as np from collections import defaultdict from sklearn.model_selection import train_test_split", "missing from the command line arguments\") exit(1) spec_file = sys.argv[1]", "prec = 100 * np.array(prec) mean = np.mean(prec) std =", "(name, 100*prec, k, len(y_test))) precision[name].append(prec) logging.info(\"* SUMMARY %s\", spec_file) for", "np.where(np.isin(y_train, normal_classes))[0] X_train = X_train[keep, :] y_train = y_train[keep] #", "anomaly_indices = y_pred.argsort()[:k] prec = ref[anomaly_indices].sum() / k logging.info(\"%s: %.1f%%", "training #flows, loss = find_best_flows(X_train, device='cpu', n_trials=1) from anoflows.anoflow_bagging import", "= y_pred.argsort()[:k] prec = ref[anomaly_indices].sum() / k logging.info(\"%s: %.1f%% (%d", "range(spec.get(\"rounds\", 1)): # random sampling df = df.sample(n=max_rows, replace=False) label_col", "spec[\"anomaly_classes\"]))[0] ref = np.zeros(len(y_test)) ref[y_true] = 1 k = len(y_true)", "prec in precision.items(): prec = 100 * np.array(prec) mean =", "= spec.get(\"novelty\", True) normal_classes = spec[\"normal_classes\"] precision = defaultdict(list) for", "logging import yaml import pandas as pd import numpy as", "#flows, loss = find_best_flows(X_train, device='cpu', n_trials=1) from anoflows.anoflow_bagging import AnoFlowBagging", "np.where(np.isin(y_test, spec[\"anomaly_classes\"]))[0] ref = np.zeros(len(y_test)) ref[y_true] = 1 k =", "as pd import numpy as np from collections import defaultdict", "== 1: logging.error(\"YAML data specification missing from the command line", "y_pred.argsort()[:k] prec = ref[anomaly_indices].sum() / k logging.info(\"%s: %.1f%% (%d anomalies", "in pred.items(): anomaly_indices = y_pred.argsort()[:k] prec = ref[anomaly_indices].sum() / k", "train_test_split(X, y, shuffle=False, test_size=0.5) if novelty_detection: keep = np.where(np.isin(y_train, normal_classes))[0]", "iforest.decision_function(X_test) } # evaluation y_true = np.where(np.isin(y_test, spec[\"anomaly_classes\"]))[0] ref =", "replace=False) label_col = spec[\"label_column\"] y = df[label_col].values other = df.drop(label_col,", "iforest = IsolationForest().fit(X_train) # prediction pred = { \"anoflows\": flows.likelihood(X_test),", "precision = defaultdict(list) for rounds in range(spec.get(\"rounds\", 1)): # random", "y, shuffle=False, test_size=0.5) if novelty_detection: keep = np.where(np.isin(y_train, normal_classes))[0] X_train", "flows.likelihood(X_test), \"iforest\": iforest.decision_function(X_test) } # evaluation y_true = np.where(np.isin(y_test, spec[\"anomaly_classes\"]))[0]", "spec_file) for name, prec in precision.items(): prec = 100 *", "= len(y_true) for name, y_pred in pred.items(): anomaly_indices = y_pred.argsort()[:k]", "%.1f%% (%d anomalies / %d rows)\" % (name, 100*prec, k,", "defaultdict(list) for rounds in range(spec.get(\"rounds\", 1)): # random sampling df", "anoflows.hpo import find_best_flows from data_loading import load_data logging.getLogger().setLevel(logging.INFO) if len(sys.argv)", "# evaluation y_true = np.where(np.isin(y_test, spec[\"anomaly_classes\"]))[0] ref = np.zeros(len(y_test)) ref[y_true]", "y_train, y_test = \\ train_test_split(X, y, shuffle=False, test_size=0.5) if novelty_detection:", "import sys import logging import yaml import pandas as pd", "y_pred in pred.items(): anomaly_indices = y_pred.argsort()[:k] prec = ref[anomaly_indices].sum() /", "data_loading import load_data logging.getLogger().setLevel(logging.INFO) if len(sys.argv) == 1: logging.error(\"YAML data", "= df.drop(label_col, inplace=False, axis=1) X = other.values # imputing X", "other = df.drop(label_col, inplace=False, axis=1) X = other.values # imputing", "ref = np.zeros(len(y_test)) ref[y_true] = 1 k = len(y_true) for", "for name, prec in precision.items(): prec = 100 * np.array(prec)", "for rounds in range(spec.get(\"rounds\", 1)): # random sampling df =", "line arguments\") exit(1) spec_file = sys.argv[1] df, spec = load_data(spec_file)" ]
[ "'install path': Path(__file__).resolve().parent, 'python version': sys.version, 'platform': platform.platform(), 'optional deps.", "platform.platform(), 'optional deps. installed': optional_deps, } return '\\n'.join('{:>30} {}'.format(k +", "= '1.4a1' def version_info() -> str: import platform import sys", "'\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n', ' ')) for k, v", "continue optional_deps.append(p) info = { 'pydantic version': VERSION, 'pydantic compiled':", "__all__ = ['VERSION', 'version_info'] VERSION = '1.4a1' def version_info() ->", "'pydantic version': VERSION, 'pydantic compiled': compiled, 'install path': Path(__file__).resolve().parent, 'python", "'devtools'): try: import_module(p.replace('-', '_')) except ImportError: continue optional_deps.append(p) info =", "version_info() -> str: import platform import sys from importlib import", "['VERSION', 'version_info'] VERSION = '1.4a1' def version_info() -> str: import", "from importlib import import_module from pathlib import Path from .main", "optional_deps.append(p) info = { 'pydantic version': VERSION, 'pydantic compiled': compiled,", "compiled': compiled, 'install path': Path(__file__).resolve().parent, 'python version': sys.version, 'platform': platform.platform(),", "'python version': sys.version, 'platform': platform.platform(), 'optional deps. installed': optional_deps, }", "ImportError: continue optional_deps.append(p) info = { 'pydantic version': VERSION, 'pydantic", "import import_module from pathlib import Path from .main import compiled", "pathlib import Path from .main import compiled optional_deps = []", "Path from .main import compiled optional_deps = [] for p", "p in ('typing-extensions', 'email-validator', 'devtools'): try: import_module(p.replace('-', '_')) except ImportError:", "'optional deps. installed': optional_deps, } return '\\n'.join('{:>30} {}'.format(k + ':',", "str: import platform import sys from importlib import import_module from", "[] for p in ('typing-extensions', 'email-validator', 'devtools'): try: import_module(p.replace('-', '_'))", "} return '\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n', ' ')) for", "Path(__file__).resolve().parent, 'python version': sys.version, 'platform': platform.platform(), 'optional deps. installed': optional_deps,", "optional_deps = [] for p in ('typing-extensions', 'email-validator', 'devtools'): try:", "info = { 'pydantic version': VERSION, 'pydantic compiled': compiled, 'install", "import sys from importlib import import_module from pathlib import Path", "compiled optional_deps = [] for p in ('typing-extensions', 'email-validator', 'devtools'):", "from pathlib import Path from .main import compiled optional_deps =", ".main import compiled optional_deps = [] for p in ('typing-extensions',", "in ('typing-extensions', 'email-validator', 'devtools'): try: import_module(p.replace('-', '_')) except ImportError: continue", "except ImportError: continue optional_deps.append(p) info = { 'pydantic version': VERSION,", "compiled, 'install path': Path(__file__).resolve().parent, 'python version': sys.version, 'platform': platform.platform(), 'optional", "'platform': platform.platform(), 'optional deps. installed': optional_deps, } return '\\n'.join('{:>30} {}'.format(k", "deps. installed': optional_deps, } return '\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n',", "installed': optional_deps, } return '\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n', '", "importlib import import_module from pathlib import Path from .main import", "-> str: import platform import sys from importlib import import_module", "'version_info'] VERSION = '1.4a1' def version_info() -> str: import platform", "'1.4a1' def version_info() -> str: import platform import sys from", "import compiled optional_deps = [] for p in ('typing-extensions', 'email-validator',", "version': VERSION, 'pydantic compiled': compiled, 'install path': Path(__file__).resolve().parent, 'python version':", "version': sys.version, 'platform': platform.platform(), 'optional deps. installed': optional_deps, } return", "import_module from pathlib import Path from .main import compiled optional_deps", "for p in ('typing-extensions', 'email-validator', 'devtools'): try: import_module(p.replace('-', '_')) except", "try: import_module(p.replace('-', '_')) except ImportError: continue optional_deps.append(p) info = {", "= [] for p in ('typing-extensions', 'email-validator', 'devtools'): try: import_module(p.replace('-',", "from .main import compiled optional_deps = [] for p in", "{}'.format(k + ':', str(v).replace('\\n', ' ')) for k, v in", "VERSION = '1.4a1' def version_info() -> str: import platform import", "import Path from .main import compiled optional_deps = [] for", "'pydantic compiled': compiled, 'install path': Path(__file__).resolve().parent, 'python version': sys.version, 'platform':", "VERSION, 'pydantic compiled': compiled, 'install path': Path(__file__).resolve().parent, 'python version': sys.version,", "<reponame>jamescurtin/pydantic __all__ = ['VERSION', 'version_info'] VERSION = '1.4a1' def version_info()", "sys.version, 'platform': platform.platform(), 'optional deps. installed': optional_deps, } return '\\n'.join('{:>30}", "+ ':', str(v).replace('\\n', ' ')) for k, v in info.items())", "('typing-extensions', 'email-validator', 'devtools'): try: import_module(p.replace('-', '_')) except ImportError: continue optional_deps.append(p)", "path': Path(__file__).resolve().parent, 'python version': sys.version, 'platform': platform.platform(), 'optional deps. installed':", "return '\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n', ' ')) for k,", "'email-validator', 'devtools'): try: import_module(p.replace('-', '_')) except ImportError: continue optional_deps.append(p) info", "import platform import sys from importlib import import_module from pathlib", "platform import sys from importlib import import_module from pathlib import", "import_module(p.replace('-', '_')) except ImportError: continue optional_deps.append(p) info = { 'pydantic", "'_')) except ImportError: continue optional_deps.append(p) info = { 'pydantic version':", "{ 'pydantic version': VERSION, 'pydantic compiled': compiled, 'install path': Path(__file__).resolve().parent,", "optional_deps, } return '\\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\\n', ' '))", "= { 'pydantic version': VERSION, 'pydantic compiled': compiled, 'install path':", "= ['VERSION', 'version_info'] VERSION = '1.4a1' def version_info() -> str:", "sys from importlib import import_module from pathlib import Path from", "def version_info() -> str: import platform import sys from importlib" ]
[ "subject.configuration: token = '/'.join(tokens) if dependency: structure = dependency.construct_schema(name=token) if", "while queue: subject, tokens, dependency = queue.pop(0) if subject.configuration: token", "= {} schemas = {} units = {} @classmethod def", "def register_unit(cls, unit): cls.units[unit.identity] = unit if cls.is_configurable(unit): queue =", "= '/'.join(tokens) if dependency: structure = dependency.construct_schema(name=token) if dependency.token and", "else: schema = dependency.construct_schema(generic=True, name=token) if dependency.optional: schema = schema.clone(required=False)", "in obj.__bases__) @classmethod def purge(cls): cls.schemas = {} cls.units =", "structure = structure.clone(required=False) else: structure = subject.configuration.schema.clone(required=False, name=token) cls.schemas[token] =", "= structure for attr, subdependency in subject.dependencies.iteritems(): queue.append((subdependency.unit, tokens +", "obj): return (obj is not Configurable and issubclass(obj, Configurable) and", "= unit if cls.is_configurable(unit): queue = [(unit, [unit.identity], None)] while", "'/'.join(tokens) if dependency: structure = dependency.construct_schema(name=token) if dependency.token and structure.required:", "structure = dependency.construct_schema(name=token) if dependency.token and structure.required: structure = structure.clone(required=False)", "structure = subject.configuration.schema.clone(required=False, name=token) cls.schemas[token] = structure for attr, subdependency", "unit if cls.is_configurable(unit): queue = [(unit, [unit.identity], None)] while queue:", "unit): cls.units[unit.identity] = unit if cls.is_configurable(unit): queue = [(unit, [unit.identity],", "token = '/'.join(tokens) if dependency: structure = dependency.construct_schema(name=token) if dependency.token", "issubclass(obj, Configurable) and Configurable not in obj.__bases__) @classmethod def purge(cls):", "subject, tokens, dependency = queue.pop(0) if subject.configuration: token = '/'.join(tokens)", "in cls.schemas: structure = cls.schemas[token] if configuration.required and not dependency.optional", "= schema.clone(required=False) cls.schemas[token] = schema @classmethod def register_unit(cls, unit): cls.units[unit.identity]", "[(unit, [unit.identity], None)] while queue: subject, tokens, dependency = queue.pop(0)", "if configuration.required and not dependency.optional and not structure.required: structure.required =", "from scheme import Structure __all__ = ('Configurable', 'Registry') class Configurable(object):", "not in obj.__bases__) @classmethod def purge(cls): cls.schemas = {} cls.units", "structure.clone(required=False) else: structure = subject.configuration.schema.clone(required=False, name=token) cls.schemas[token] = structure for", "type(dependency) if not dependency.configurable: return configuration = dependency.unit.configuration if token", "a configuration chain.\"\"\" class Registry(object): \"\"\"The unit registry.\"\"\" dependencies =", "structure.required = True else: schema = dependency.construct_schema(generic=True, name=token) if dependency.optional:", "dependency.construct_schema(name=token) if dependency.token and structure.required: structure = structure.clone(required=False) else: structure", "purge(cls): cls.schemas = {} cls.units = {} @classmethod def register_dependency(cls,", "schema.clone(required=False) cls.schemas[token] = schema @classmethod def register_unit(cls, unit): cls.units[unit.identity] =", "= {} units = {} @classmethod def is_configurable(cls, obj): return", "dependency.optional: schema = schema.clone(required=False) cls.schemas[token] = schema @classmethod def register_unit(cls,", "queue.pop(0) if subject.configuration: token = '/'.join(tokens) if dependency: structure =", "for attr, subdependency in subject.dependencies.iteritems(): queue.append((subdependency.unit, tokens + [attr], subdependency))", "cls.units = {} @classmethod def register_dependency(cls, dependency): token = dependency.token", "@classmethod def purge(cls): cls.schemas = {} cls.units = {} @classmethod", "cls.schemas: structure = cls.schemas[token] if configuration.required and not dependency.optional and", "if not dependency.configurable: return configuration = dependency.unit.configuration if token in", "and not dependency.optional and not structure.required: structure.required = True else:", "subclasses can establish a configuration chain.\"\"\" class Registry(object): \"\"\"The unit", "= type(dependency) if not dependency.configurable: return configuration = dependency.unit.configuration if", "tokens, dependency = queue.pop(0) if subject.configuration: token = '/'.join(tokens) if", "structure.required: structure = structure.clone(required=False) else: structure = subject.configuration.schema.clone(required=False, name=token) cls.schemas[token]", "= structure.clone(required=False) else: structure = subject.configuration.schema.clone(required=False, name=token) cls.schemas[token] = structure", "cls.dependencies: cls.dependencies[token] = type(dependency) if not dependency.configurable: return configuration =", "<filename>spire/core/registry.py from scheme import Structure __all__ = ('Configurable', 'Registry') class", "Configurable) and Configurable not in obj.__bases__) @classmethod def purge(cls): cls.schemas", "and Configurable not in obj.__bases__) @classmethod def purge(cls): cls.schemas =", "if cls.is_configurable(unit): queue = [(unit, [unit.identity], None)] while queue: subject,", "dependency.token and structure.required: structure = structure.clone(required=False) else: structure = subject.configuration.schema.clone(required=False,", "cls.schemas = {} cls.units = {} @classmethod def register_dependency(cls, dependency):", "return if token not in cls.dependencies: cls.dependencies[token] = type(dependency) if", "= dependency.token if not token: return if token not in", "= {} @classmethod def register_dependency(cls, dependency): token = dependency.token if", "__all__ = ('Configurable', 'Registry') class Configurable(object): \"\"\"A sentry class which", "configuration.required and not dependency.optional and not structure.required: structure.required = True", "cls.schemas[token] = schema @classmethod def register_unit(cls, unit): cls.units[unit.identity] = unit", "{} @classmethod def register_dependency(cls, dependency): token = dependency.token if not", "dependency.construct_schema(generic=True, name=token) if dependency.optional: schema = schema.clone(required=False) cls.schemas[token] = schema", "sentry class which indicates that subclasses can establish a configuration", "None)] while queue: subject, tokens, dependency = queue.pop(0) if subject.configuration:", "if dependency: structure = dependency.construct_schema(name=token) if dependency.token and structure.required: structure", "is not Configurable and issubclass(obj, Configurable) and Configurable not in", "= dependency.construct_schema(name=token) if dependency.token and structure.required: structure = structure.clone(required=False) else:", "class which indicates that subclasses can establish a configuration chain.\"\"\"", "not in cls.dependencies: cls.dependencies[token] = type(dependency) if not dependency.configurable: return", "indicates that subclasses can establish a configuration chain.\"\"\" class Registry(object):", "Structure __all__ = ('Configurable', 'Registry') class Configurable(object): \"\"\"A sentry class", "if token not in cls.dependencies: cls.dependencies[token] = type(dependency) if not", "in cls.dependencies: cls.dependencies[token] = type(dependency) if not dependency.configurable: return configuration", "structure = cls.schemas[token] if configuration.required and not dependency.optional and not", "schema = dependency.construct_schema(generic=True, name=token) if dependency.optional: schema = schema.clone(required=False) cls.schemas[token]", "token: return if token not in cls.dependencies: cls.dependencies[token] = type(dependency)", "if subject.configuration: token = '/'.join(tokens) if dependency: structure = dependency.construct_schema(name=token)", "if not token: return if token not in cls.dependencies: cls.dependencies[token]", "(obj is not Configurable and issubclass(obj, Configurable) and Configurable not", "register_dependency(cls, dependency): token = dependency.token if not token: return if", "\"\"\"A sentry class which indicates that subclasses can establish a", "not dependency.configurable: return configuration = dependency.unit.configuration if token in cls.schemas:", "structure for attr, subdependency in subject.dependencies.iteritems(): queue.append((subdependency.unit, tokens + [attr],", "not Configurable and issubclass(obj, Configurable) and Configurable not in obj.__bases__)", "= queue.pop(0) if subject.configuration: token = '/'.join(tokens) if dependency: structure", "dependency: structure = dependency.construct_schema(name=token) if dependency.token and structure.required: structure =", "configuration = dependency.unit.configuration if token in cls.schemas: structure = cls.schemas[token]", "'Registry') class Configurable(object): \"\"\"A sentry class which indicates that subclasses", "token = dependency.token if not token: return if token not", "{} cls.units = {} @classmethod def register_dependency(cls, dependency): token =", "return (obj is not Configurable and issubclass(obj, Configurable) and Configurable", "Registry(object): \"\"\"The unit registry.\"\"\" dependencies = {} schemas = {}", "= subject.configuration.schema.clone(required=False, name=token) cls.schemas[token] = structure for attr, subdependency in", "and structure.required: structure = structure.clone(required=False) else: structure = subject.configuration.schema.clone(required=False, name=token)", "can establish a configuration chain.\"\"\" class Registry(object): \"\"\"The unit registry.\"\"\"", "cls.schemas[token] = structure for attr, subdependency in subject.dependencies.iteritems(): queue.append((subdependency.unit, tokens", "unit registry.\"\"\" dependencies = {} schemas = {} units =", "not token: return if token not in cls.dependencies: cls.dependencies[token] =", "cls.dependencies[token] = type(dependency) if not dependency.configurable: return configuration = dependency.unit.configuration", "import Structure __all__ = ('Configurable', 'Registry') class Configurable(object): \"\"\"A sentry", "name=token) if dependency.optional: schema = schema.clone(required=False) cls.schemas[token] = schema @classmethod", "else: structure = subject.configuration.schema.clone(required=False, name=token) cls.schemas[token] = structure for attr,", "('Configurable', 'Registry') class Configurable(object): \"\"\"A sentry class which indicates that", "{} units = {} @classmethod def is_configurable(cls, obj): return (obj", "class Configurable(object): \"\"\"A sentry class which indicates that subclasses can", "configuration chain.\"\"\" class Registry(object): \"\"\"The unit registry.\"\"\" dependencies = {}", "@classmethod def is_configurable(cls, obj): return (obj is not Configurable and", "queue = [(unit, [unit.identity], None)] while queue: subject, tokens, dependency", "cls.is_configurable(unit): queue = [(unit, [unit.identity], None)] while queue: subject, tokens,", "schema = schema.clone(required=False) cls.schemas[token] = schema @classmethod def register_unit(cls, unit):", "queue: subject, tokens, dependency = queue.pop(0) if subject.configuration: token =", "class Registry(object): \"\"\"The unit registry.\"\"\" dependencies = {} schemas =", "= cls.schemas[token] if configuration.required and not dependency.optional and not structure.required:", "Configurable not in obj.__bases__) @classmethod def purge(cls): cls.schemas = {}", "establish a configuration chain.\"\"\" class Registry(object): \"\"\"The unit registry.\"\"\" dependencies", "dependency.unit.configuration if token in cls.schemas: structure = cls.schemas[token] if configuration.required", "True else: schema = dependency.construct_schema(generic=True, name=token) if dependency.optional: schema =", "@classmethod def register_unit(cls, unit): cls.units[unit.identity] = unit if cls.is_configurable(unit): queue", "name=token) cls.schemas[token] = structure for attr, subdependency in subject.dependencies.iteritems(): queue.append((subdependency.unit,", "{} @classmethod def is_configurable(cls, obj): return (obj is not Configurable", "= {} @classmethod def is_configurable(cls, obj): return (obj is not", "def register_dependency(cls, dependency): token = dependency.token if not token: return", "{} schemas = {} units = {} @classmethod def is_configurable(cls,", "dependency.optional and not structure.required: structure.required = True else: schema =", "not dependency.optional and not structure.required: structure.required = True else: schema", "chain.\"\"\" class Registry(object): \"\"\"The unit registry.\"\"\" dependencies = {} schemas", "def is_configurable(cls, obj): return (obj is not Configurable and issubclass(obj,", "if token in cls.schemas: structure = cls.schemas[token] if configuration.required and", "if dependency.token and structure.required: structure = structure.clone(required=False) else: structure =", "register_unit(cls, unit): cls.units[unit.identity] = unit if cls.is_configurable(unit): queue = [(unit,", "= schema @classmethod def register_unit(cls, unit): cls.units[unit.identity] = unit if", "= ('Configurable', 'Registry') class Configurable(object): \"\"\"A sentry class which indicates", "registry.\"\"\" dependencies = {} schemas = {} units = {}", "is_configurable(cls, obj): return (obj is not Configurable and issubclass(obj, Configurable)", "and issubclass(obj, Configurable) and Configurable not in obj.__bases__) @classmethod def", "return configuration = dependency.unit.configuration if token in cls.schemas: structure =", "scheme import Structure __all__ = ('Configurable', 'Registry') class Configurable(object): \"\"\"A", "\"\"\"The unit registry.\"\"\" dependencies = {} schemas = {} units", "schemas = {} units = {} @classmethod def is_configurable(cls, obj):", "def purge(cls): cls.schemas = {} cls.units = {} @classmethod def", "token not in cls.dependencies: cls.dependencies[token] = type(dependency) if not dependency.configurable:", "dependency.token if not token: return if token not in cls.dependencies:", "not structure.required: structure.required = True else: schema = dependency.construct_schema(generic=True, name=token)", "@classmethod def register_dependency(cls, dependency): token = dependency.token if not token:", "token in cls.schemas: structure = cls.schemas[token] if configuration.required and not", "dependency): token = dependency.token if not token: return if token", "= dependency.unit.configuration if token in cls.schemas: structure = cls.schemas[token] if", "subject.configuration.schema.clone(required=False, name=token) cls.schemas[token] = structure for attr, subdependency in subject.dependencies.iteritems():", "obj.__bases__) @classmethod def purge(cls): cls.schemas = {} cls.units = {}", "= {} cls.units = {} @classmethod def register_dependency(cls, dependency): token", "Configurable(object): \"\"\"A sentry class which indicates that subclasses can establish", "and not structure.required: structure.required = True else: schema = dependency.construct_schema(generic=True,", "structure.required: structure.required = True else: schema = dependency.construct_schema(generic=True, name=token) if", "which indicates that subclasses can establish a configuration chain.\"\"\" class", "if dependency.optional: schema = schema.clone(required=False) cls.schemas[token] = schema @classmethod def", "schema @classmethod def register_unit(cls, unit): cls.units[unit.identity] = unit if cls.is_configurable(unit):", "cls.units[unit.identity] = unit if cls.is_configurable(unit): queue = [(unit, [unit.identity], None)]", "dependencies = {} schemas = {} units = {} @classmethod", "units = {} @classmethod def is_configurable(cls, obj): return (obj is", "[unit.identity], None)] while queue: subject, tokens, dependency = queue.pop(0) if", "= [(unit, [unit.identity], None)] while queue: subject, tokens, dependency =", "cls.schemas[token] if configuration.required and not dependency.optional and not structure.required: structure.required", "that subclasses can establish a configuration chain.\"\"\" class Registry(object): \"\"\"The", "= True else: schema = dependency.construct_schema(generic=True, name=token) if dependency.optional: schema", "Configurable and issubclass(obj, Configurable) and Configurable not in obj.__bases__) @classmethod", "dependency.configurable: return configuration = dependency.unit.configuration if token in cls.schemas: structure", "dependency = queue.pop(0) if subject.configuration: token = '/'.join(tokens) if dependency:", "= dependency.construct_schema(generic=True, name=token) if dependency.optional: schema = schema.clone(required=False) cls.schemas[token] =" ]
[ ".messaging import * from .method import * from .operation import", "import * from .database import * from .entrypoint import *", "* from .method import * from .operation import * from", "from .messaging import * from .method import * from .operation", "import * from .operation import * from .stack import *", "import * from .group import * from .http import *", "* from .http import * from .messaging import * from", "from .method import * from .operation import * from .stack", ".command import * from .database import * from .entrypoint import", "from .http import * from .messaging import * from .method", "* from .database import * from .entrypoint import * from", "* from .messaging import * from .method import * from", "import * from .entrypoint import * from .group import *", ".database import * from .entrypoint import * from .group import", ".http import * from .messaging import * from .method import", ".method import * from .operation import * from .stack import", "* from .entrypoint import * from .group import * from", ".operation import * from .stack import * from .threads import", "<filename>oslo_devsupport/model/__init__.py from .command import * from .database import * from", "* from .operation import * from .stack import * from", "import * from .stack import * from .threads import *", "from .database import * from .entrypoint import * from .group", "from .entrypoint import * from .group import * from .http", "* from .group import * from .http import * from", ".entrypoint import * from .group import * from .http import", "import * from .messaging import * from .method import *", "import * from .http import * from .messaging import *", "from .command import * from .database import * from .entrypoint", ".group import * from .http import * from .messaging import", "from .operation import * from .stack import * from .threads", "from .group import * from .http import * from .messaging", "import * from .method import * from .operation import *" ]
[ "= random.choice(range(len(tokens))) choice = \" \".join( tokens[phrase_index : min(len(tokens), phrase_index", "target = f\"<s> {target}\" if args.add_eos: target = f\"{target} </s>\"", "\"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--number\", \"-n\", type=int, default=1, help=\"number of", "target = line.split(\"\\t\") if args.add_sos: target = f\"<s> {target}\" if", "args.add_sos: target = f\"<s> {target}\" if args.add_eos: target = f\"{target}", "and its affiliates. # # This source code is licensed", "= line.split(\"\\t\") if args.add_sos: target = f\"<s> {target}\" if args.add_eos:", "in the # LICENSE file in the root directory of", "directory of this source tree. \"\"\"Extracts random constraints from reference", "tree. \"\"\"Extracts random constraints from reference files.\"\"\" import argparse import", "for line in sys.stdin: constraints = [] def add_constraint(constraint): constraints.append(constraint)", "under the MIT license found in the # LICENSE file", "in the root directory of this source tree. \"\"\"Extracts random", "for key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep=\"\\t\") if __name__", "index + length): words.pop(index) return phr def main(args): if args.seed:", "of phrases\") parser.add_argument(\"--len\", \"-l\", type=int, default=1, help=\"phrase length\") parser.add_argument( \"--add-sos\",", "if phrase_index > 0: words.append(\" \".join(tokens[0:phrase_index])) if phrase_index + 1", "= \" \".join( tokens[phrase_index : min(len(tokens), phrase_index + args.len)] )", "if len(words) == 0: break segmentno = random.choice(range(len(words))) segment =", "length): words.pop(index) return phr def main(args): if args.seed: random.seed(args.seed) for", "Copyright (c) Facebook, Inc. and its affiliates. # # This", "with spaces target = target.replace(choice, \" \" * len(choice), 1)", "= argparse.ArgumentParser() parser.add_argument(\"--number\", \"-n\", type=int, default=1, help=\"number of phrases\") parser.add_argument(\"--len\",", "LICENSE file in the root directory of this source tree.", "+ length]) for i in range(index, index + length): words.pop(index)", "help=\"number of phrases\") parser.add_argument(\"--len\", \"-l\", type=int, default=1, help=\"phrase length\") parser.add_argument(", "# # Copyright (c) Facebook, Inc. and its affiliates. #", "\" * len(choice), 1) for key in sorted(choices.keys()): add_constraint(choices[key]) print(source,", "its affiliates. # # This source code is licensed under", "): tokens.pop(phrase_index) if phrase_index > 0: words.append(\" \".join(tokens[0:phrase_index])) if phrase_index", "args.seed: random.seed(args.seed) for line in sys.stdin: constraints = [] def", ") parser.add_argument( \"--add-eos\", default=False, action=\"store_true\", help=\"add </s> token\" ) parser.add_argument(\"--seed\",", "f\"{target} </s>\" if len(target.split()) >= args.len: words = [target] num", "words.append(\" \".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask out with spaces", "length\") parser.add_argument( \"--add-sos\", default=False, action=\"store_true\", help=\"add <s> token\" ) parser.add_argument(", "in range(num): if len(words) == 0: break segmentno = random.choice(range(len(words)))", "min(len(tokens), phrase_index + args.len)] ) for j in range( phrase_index,", "files.\"\"\" import argparse import random import sys from sacrebleu import", "0: break segmentno = random.choice(range(len(words))) segment = words.pop(segmentno) tokens =", "type=int, default=1, help=\"number of phrases\") parser.add_argument(\"--len\", \"-l\", type=int, default=1, help=\"phrase", "\" \".join( tokens[phrase_index : min(len(tokens), phrase_index + args.len)] ) for", "0: words.append(\" \".join(tokens[0:phrase_index])) if phrase_index + 1 < len(tokens): words.append(\"", "target.replace(choice, \" \" * len(choice), 1) for key in sorted(choices.keys()):", "spaces target = target.replace(choice, \" \" * len(choice), 1) for", "- length + 1 phr = \" \".join(words[index : index", "+ 1 phr = \" \".join(words[index : index + length])", "num = args.number choices = {} for i in range(num):", "tokens.pop(phrase_index) if phrase_index > 0: words.append(\" \".join(tokens[0:phrase_index])) if phrase_index +", "length]) for i in range(index, index + length): words.pop(index) return", "\" \" * len(choice), 1) for key in sorted(choices.keys()): add_constraint(choices[key])", "# LICENSE file in the root directory of this source", "if \"\\t\" in line: source, target = line.split(\"\\t\") if args.add_sos:", "the MIT license found in the # LICENSE file in", "for i in range(index, index + length): words.pop(index) return phr", "random.seed(args.seed) for line in sys.stdin: constraints = [] def add_constraint(constraint):", "{target}\" if args.add_eos: target = f\"{target} </s>\" if len(target.split()) >=", "choices = {} for i in range(num): if len(words) ==", "[target] num = args.number choices = {} for i in", "= {} for i in range(num): if len(words) == 0:", "found in the # LICENSE file in the root directory", "+ args.len)] ) for j in range( phrase_index, min(len(tokens), phrase_index", "phrase_index + args.len)] ) for j in range( phrase_index, min(len(tokens),", "segmentno = random.choice(range(len(words))) segment = words.pop(segmentno) tokens = segment.split() phrase_index", "key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep=\"\\t\") if __name__ ==", "parser.add_argument( \"--add-eos\", default=False, action=\"store_true\", help=\"add </s> token\" ) parser.add_argument(\"--seed\", \"-s\",", "import random import sys from sacrebleu import extract_ngrams def get_phrase(words,", "len(tokens): words.append(\" \".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask out with", "len(words) - length + 1 phr = \" \".join(words[index :", "= f\"<s> {target}\" if args.add_eos: target = f\"{target} </s>\" if", "license found in the # LICENSE file in the root", "if args.add_eos: target = f\"{target} </s>\" if len(target.split()) >= args.len:", "\".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask out with spaces target", "min(len(tokens), phrase_index + args.len) ): tokens.pop(phrase_index) if phrase_index > 0:", "line in sys.stdin: constraints = [] def add_constraint(constraint): constraints.append(constraint) source", "tokens = segment.split() phrase_index = random.choice(range(len(tokens))) choice = \" \".join(", "default=False, action=\"store_true\", help=\"add </s> token\" ) parser.add_argument(\"--seed\", \"-s\", default=0, type=int)", "{} for i in range(num): if len(words) == 0: break", "phrase_index, min(len(tokens), phrase_index + args.len) ): tokens.pop(phrase_index) if phrase_index >", "argparse import random import sys from sacrebleu import extract_ngrams def", "def get_phrase(words, index, length): assert index < len(words) - length", "phrase_index > 0: words.append(\" \".join(tokens[0:phrase_index])) if phrase_index + 1 <", "phr = \" \".join(words[index : index + length]) for i", "default=False, action=\"store_true\", help=\"add <s> token\" ) parser.add_argument( \"--add-eos\", default=False, action=\"store_true\",", "in range(index, index + length): words.pop(index) return phr def main(args):", "\"\\t\" in line: source, target = line.split(\"\\t\") if args.add_sos: target", "target = f\"{target} </s>\" if len(target.split()) >= args.len: words =", "range(index, index + length): words.pop(index) return phr def main(args): if", "token\" ) parser.add_argument( \"--add-eos\", default=False, action=\"store_true\", help=\"add </s> token\" )", "get_phrase(words, index, length): assert index < len(words) - length +", "in range( phrase_index, min(len(tokens), phrase_index + args.len) ): tokens.pop(phrase_index) if", "help=\"phrase length\") parser.add_argument( \"--add-sos\", default=False, action=\"store_true\", help=\"add <s> token\" )", "phrase_index + args.len) ): tokens.pop(phrase_index) if phrase_index > 0: words.append(\"", "< len(tokens): words.append(\" \".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask out", "1 < len(tokens): words.append(\" \".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask", "index, length): assert index < len(words) - length + 1", "\"-l\", type=int, default=1, help=\"phrase length\") parser.add_argument( \"--add-sos\", default=False, action=\"store_true\", help=\"add", "main(args): if args.seed: random.seed(args.seed) for line in sys.stdin: constraints =", "in sys.stdin: constraints = [] def add_constraint(constraint): constraints.append(constraint) source =", "mask out with spaces target = target.replace(choice, \" \" *", "= choice # mask out with spaces target = target.replace(choice,", "parser.add_argument(\"--number\", \"-n\", type=int, default=1, help=\"number of phrases\") parser.add_argument(\"--len\", \"-l\", type=int,", "of this source tree. \"\"\"Extracts random constraints from reference files.\"\"\"", "sys from sacrebleu import extract_ngrams def get_phrase(words, index, length): assert", ">= args.len: words = [target] num = args.number choices =", "extract_ngrams def get_phrase(words, index, length): assert index < len(words) -", "import sys from sacrebleu import extract_ngrams def get_phrase(words, index, length):", "sacrebleu import extract_ngrams def get_phrase(words, index, length): assert index <", "</s>\" if len(target.split()) >= args.len: words = [target] num =", "source tree. \"\"\"Extracts random constraints from reference files.\"\"\" import argparse", "= target.replace(choice, \" \" * len(choice), 1) for key in", "line.rstrip() if \"\\t\" in line: source, target = line.split(\"\\t\") if", "import extract_ngrams def get_phrase(words, index, length): assert index < len(words)", "source, target = line.split(\"\\t\") if args.add_sos: target = f\"<s> {target}\"", "random.choice(range(len(words))) segment = words.pop(segmentno) tokens = segment.split() phrase_index = random.choice(range(len(tokens)))", "(c) Facebook, Inc. and its affiliates. # # This source", "1) for key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep=\"\\t\") if", "parser.add_argument(\"--len\", \"-l\", type=int, default=1, help=\"phrase length\") parser.add_argument( \"--add-sos\", default=False, action=\"store_true\",", "> 0: words.append(\" \".join(tokens[0:phrase_index])) if phrase_index + 1 < len(tokens):", "i in range(num): if len(words) == 0: break segmentno =", "line.split(\"\\t\") if args.add_sos: target = f\"<s> {target}\" if args.add_eos: target", "break segmentno = random.choice(range(len(words))) segment = words.pop(segmentno) tokens = segment.split()", "is licensed under the MIT license found in the #", "constraints from reference files.\"\"\" import argparse import random import sys", "reference files.\"\"\" import argparse import random import sys from sacrebleu", "constraints.append(constraint) source = line.rstrip() if \"\\t\" in line: source, target", "* len(choice), 1) for key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints,", "file in the root directory of this source tree. \"\"\"Extracts", "sys.stdin: constraints = [] def add_constraint(constraint): constraints.append(constraint) source = line.rstrip()", "== 0: break segmentno = random.choice(range(len(words))) segment = words.pop(segmentno) tokens", "+ 1 < len(tokens): words.append(\" \".join(tokens[phrase_index:])) choices[target.find(choice)] = choice #", "sep=\"\\t\") if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--number\", \"-n\",", "the # LICENSE file in the root directory of this", "# mask out with spaces target = target.replace(choice, \" \"", "index < len(words) - length + 1 phr = \"", "== \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--number\", \"-n\", type=int, default=1, help=\"number", "\"--add-eos\", default=False, action=\"store_true\", help=\"add </s> token\" ) parser.add_argument(\"--seed\", \"-s\", default=0,", "</s> token\" ) parser.add_argument(\"--seed\", \"-s\", default=0, type=int) args = parser.parse_args()", "add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if \"\\t\" in line: source,", "random import sys from sacrebleu import extract_ngrams def get_phrase(words, index,", "len(target.split()) >= args.len: words = [target] num = args.number choices", "for j in range( phrase_index, min(len(tokens), phrase_index + args.len) ):", "phr def main(args): if args.seed: random.seed(args.seed) for line in sys.stdin:", "*constraints, sep=\"\\t\") if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--number\",", "import argparse import random import sys from sacrebleu import extract_ngrams", "line: source, target = line.split(\"\\t\") if args.add_sos: target = f\"<s>", "__name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--number\", \"-n\", type=int, default=1,", ") for j in range( phrase_index, min(len(tokens), phrase_index + args.len)", "args.add_eos: target = f\"{target} </s>\" if len(target.split()) >= args.len: words", "= [] def add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if \"\\t\"", "# Copyright (c) Facebook, Inc. and its affiliates. # #", "\" \".join(words[index : index + length]) for i in range(index,", "sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep=\"\\t\") if __name__ == \"__main__\": parser", "i in range(index, index + length): words.pop(index) return phr def", "choice = \" \".join( tokens[phrase_index : min(len(tokens), phrase_index + args.len)]", "constraints = [] def add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if", "length + 1 phr = \" \".join(words[index : index +", "1 phr = \" \".join(words[index : index + length]) for", "if args.add_sos: target = f\"<s> {target}\" if args.add_eos: target =", "token\" ) parser.add_argument(\"--seed\", \"-s\", default=0, type=int) args = parser.parse_args() Main(args)", "def main(args): if args.seed: random.seed(args.seed) for line in sys.stdin: constraints", "This source code is licensed under the MIT license found", "\"--add-sos\", default=False, action=\"store_true\", help=\"add <s> token\" ) parser.add_argument( \"--add-eos\", default=False,", "in line: source, target = line.split(\"\\t\") if args.add_sos: target =", "if len(target.split()) >= args.len: words = [target] num = args.number", "args.len: words = [target] num = args.number choices = {}", "= args.number choices = {} for i in range(num): if", "if phrase_index + 1 < len(tokens): words.append(\" \".join(tokens[phrase_index:])) choices[target.find(choice)] =", "+ length): words.pop(index) return phr def main(args): if args.seed: random.seed(args.seed)", "assert index < len(words) - length + 1 phr =", ": index + length]) for i in range(index, index +", "range( phrase_index, min(len(tokens), phrase_index + args.len) ): tokens.pop(phrase_index) if phrase_index", "args.len) ): tokens.pop(phrase_index) if phrase_index > 0: words.append(\" \".join(tokens[0:phrase_index])) if", "\".join(words[index : index + length]) for i in range(index, index", "parser = argparse.ArgumentParser() parser.add_argument(\"--number\", \"-n\", type=int, default=1, help=\"number of phrases\")", "words.append(\" \".join(tokens[0:phrase_index])) if phrase_index + 1 < len(tokens): words.append(\" \".join(tokens[phrase_index:]))", "target = target.replace(choice, \" \" * len(choice), 1) for key", "code is licensed under the MIT license found in the", "if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--number\", \"-n\", type=int,", "python3 # # Copyright (c) Facebook, Inc. and its affiliates.", "out with spaces target = target.replace(choice, \" \" * len(choice),", "#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its", "the root directory of this source tree. \"\"\"Extracts random constraints", "from sacrebleu import extract_ngrams def get_phrase(words, index, length): assert index", "action=\"store_true\", help=\"add <s> token\" ) parser.add_argument( \"--add-eos\", default=False, action=\"store_true\", help=\"add", "words.pop(segmentno) tokens = segment.split() phrase_index = random.choice(range(len(tokens))) choice = \"", "= \" \".join(words[index : index + length]) for i in", "source code is licensed under the MIT license found in", "random.choice(range(len(tokens))) choice = \" \".join( tokens[phrase_index : min(len(tokens), phrase_index +", "Facebook, Inc. and its affiliates. # # This source code", "licensed under the MIT license found in the # LICENSE", "range(num): if len(words) == 0: break segmentno = random.choice(range(len(words))) segment", "[] def add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if \"\\t\" in", "\".join( tokens[phrase_index : min(len(tokens), phrase_index + args.len)] ) for j", "type=int, default=1, help=\"phrase length\") parser.add_argument( \"--add-sos\", default=False, action=\"store_true\", help=\"add <s>", "from reference files.\"\"\" import argparse import random import sys from", "args.number choices = {} for i in range(num): if len(words)", "phrase_index + 1 < len(tokens): words.append(\" \".join(tokens[phrase_index:])) choices[target.find(choice)] = choice", "# # This source code is licensed under the MIT", "words = [target] num = args.number choices = {} for", "choices[target.find(choice)] = choice # mask out with spaces target =", "segment = words.pop(segmentno) tokens = segment.split() phrase_index = random.choice(range(len(tokens))) choice", "length): assert index < len(words) - length + 1 phr", "= words.pop(segmentno) tokens = segment.split() phrase_index = random.choice(range(len(tokens))) choice =", "MIT license found in the # LICENSE file in the", "affiliates. # # This source code is licensed under the", "Inc. and its affiliates. # # This source code is", "words.pop(index) return phr def main(args): if args.seed: random.seed(args.seed) for line", "for i in range(num): if len(words) == 0: break segmentno", "= segment.split() phrase_index = random.choice(range(len(tokens))) choice = \" \".join( tokens[phrase_index", "segment.split() phrase_index = random.choice(range(len(tokens))) choice = \" \".join( tokens[phrase_index :", "args.len)] ) for j in range( phrase_index, min(len(tokens), phrase_index +", "root directory of this source tree. \"\"\"Extracts random constraints from", "= f\"{target} </s>\" if len(target.split()) >= args.len: words = [target]", "len(words) == 0: break segmentno = random.choice(range(len(words))) segment = words.pop(segmentno)", "len(choice), 1) for key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep=\"\\t\")", "argparse.ArgumentParser() parser.add_argument(\"--number\", \"-n\", type=int, default=1, help=\"number of phrases\") parser.add_argument(\"--len\", \"-l\",", "help=\"add <s> token\" ) parser.add_argument( \"--add-eos\", default=False, action=\"store_true\", help=\"add </s>", "\"-n\", type=int, default=1, help=\"number of phrases\") parser.add_argument(\"--len\", \"-l\", type=int, default=1,", "source = line.rstrip() if \"\\t\" in line: source, target =", "return phr def main(args): if args.seed: random.seed(args.seed) for line in", "# This source code is licensed under the MIT license", "< len(words) - length + 1 phr = \" \".join(words[index", "tokens[phrase_index : min(len(tokens), phrase_index + args.len)] ) for j in", "help=\"add </s> token\" ) parser.add_argument(\"--seed\", \"-s\", default=0, type=int) args =", "j in range( phrase_index, min(len(tokens), phrase_index + args.len) ): tokens.pop(phrase_index)", "= line.rstrip() if \"\\t\" in line: source, target = line.split(\"\\t\")", "if args.seed: random.seed(args.seed) for line in sys.stdin: constraints = []", "\"\"\"Extracts random constraints from reference files.\"\"\" import argparse import random", "choice # mask out with spaces target = target.replace(choice, \"", "index + length]) for i in range(index, index + length):", ": min(len(tokens), phrase_index + args.len)] ) for j in range(", "print(source, *constraints, sep=\"\\t\") if __name__ == \"__main__\": parser = argparse.ArgumentParser()", "action=\"store_true\", help=\"add </s> token\" ) parser.add_argument(\"--seed\", \"-s\", default=0, type=int) args", "random constraints from reference files.\"\"\" import argparse import random import", "def add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if \"\\t\" in line:", "add_constraint(choices[key]) print(source, *constraints, sep=\"\\t\") if __name__ == \"__main__\": parser =", "= [target] num = args.number choices = {} for i", "phrase_index = random.choice(range(len(tokens))) choice = \" \".join( tokens[phrase_index : min(len(tokens),", "default=1, help=\"phrase length\") parser.add_argument( \"--add-sos\", default=False, action=\"store_true\", help=\"add <s> token\"", "\".join(tokens[0:phrase_index])) if phrase_index + 1 < len(tokens): words.append(\" \".join(tokens[phrase_index:])) choices[target.find(choice)]", "<s> token\" ) parser.add_argument( \"--add-eos\", default=False, action=\"store_true\", help=\"add </s> token\"", "default=1, help=\"number of phrases\") parser.add_argument(\"--len\", \"-l\", type=int, default=1, help=\"phrase length\")", "f\"<s> {target}\" if args.add_eos: target = f\"{target} </s>\" if len(target.split())", "= random.choice(range(len(words))) segment = words.pop(segmentno) tokens = segment.split() phrase_index =", "+ args.len) ): tokens.pop(phrase_index) if phrase_index > 0: words.append(\" \".join(tokens[0:phrase_index]))", "phrases\") parser.add_argument(\"--len\", \"-l\", type=int, default=1, help=\"phrase length\") parser.add_argument( \"--add-sos\", default=False,", "parser.add_argument( \"--add-sos\", default=False, action=\"store_true\", help=\"add <s> token\" ) parser.add_argument( \"--add-eos\",", "this source tree. \"\"\"Extracts random constraints from reference files.\"\"\" import", "in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep=\"\\t\") if __name__ == \"__main__\":" ]
[ "<filename>AppImageBuilder/commands/file.py # Copyright 2020 <NAME> # # Permission is hereby", "conditions: # # The above copyright notice and this permission", "0: raise FileError('\\n'.join(self.stderr)) return '\\n'.join(self.stdout) def is_executable_elf(self, path): output =", "permit persons to whom the Software is # furnished to", "and associated documentation files (the \"Software\"), # to deal in", "the Software, and to permit persons to whom the Software", "from .command import Command class FileError(RuntimeError): pass class File(Command): def", "the Software is # furnished to do so, subject to", "super().__init__('file') self.log_stdout = False self.log_command = False def query(self, path):", "above copyright notice and this permission notice shall be included", "this permission notice shall be included in # all copies", "substantial portions of the Software. import os from .command import", "(the \"Software\"), # to deal in the Software without restriction,", "# rights to use, copy, modify, merge, publish, distribute, sublicense,", "in # all copies or substantial portions of the Software.", "persons to whom the Software is # furnished to do", "class FileError(RuntimeError): pass class File(Command): def __init__(self): super().__init__('file') self.log_stdout =", "without restriction, including without limitation the # rights to use,", "def query(self, path): self._run(['file', '-b', '--exclude', 'ascii', path]) if self.return_code", "free of charge, to any person obtaining a # copy", "copy, modify, merge, publish, distribute, sublicense, and/or # sell copies", "files (the \"Software\"), # to deal in the Software without", "permission notice shall be included in # all copies or", "'\\n'.join(self.stdout) def is_executable_elf(self, path): output = self.query(path) result = ('ELF'", "charge, to any person obtaining a # copy of this", "the Software without restriction, including without limitation the # rights", "= False def query(self, path): self._run(['file', '-b', '--exclude', 'ascii', path])", "return '\\n'.join(self.stdout) def is_executable_elf(self, path): output = self.query(path) result =", "if self.return_code != 0: raise FileError('\\n'.join(self.stderr)) return '\\n'.join(self.stdout) def is_executable_elf(self,", "and/or # sell copies of the Software, and to permit", "path]) if self.return_code != 0: raise FileError('\\n'.join(self.stderr)) return '\\n'.join(self.stdout) def", "a # copy of this software and associated documentation files", "be included in # all copies or substantial portions of", "of the Software, and to permit persons to whom the", "person obtaining a # copy of this software and associated", "any person obtaining a # copy of this software and", "is hereby granted, free of charge, to any person obtaining", "# # The above copyright notice and this permission notice", "path): output = self.query(path) result = ('ELF' in output) and", "# sell copies of the Software, and to permit persons", "associated documentation files (the \"Software\"), # to deal in the", "os from .command import Command class FileError(RuntimeError): pass class File(Command):", "FileError(RuntimeError): pass class File(Command): def __init__(self): super().__init__('file') self.log_stdout = False", "= False self.log_command = False def query(self, path): self._run(['file', '-b',", "Software is # furnished to do so, subject to the", "__init__(self): super().__init__('file') self.log_stdout = False self.log_command = False def query(self,", "self.log_stdout = False self.log_command = False def query(self, path): self._run(['file',", "and to permit persons to whom the Software is #", "= self.query(path) result = ('ELF' in output) and ('executable' in", "FileError('\\n'.join(self.stderr)) return '\\n'.join(self.stdout) def is_executable_elf(self, path): output = self.query(path) result", "copies of the Software, and to permit persons to whom", "hereby granted, free of charge, to any person obtaining a", "import os from .command import Command class FileError(RuntimeError): pass class", "to deal in the Software without restriction, including without limitation", "whom the Software is # furnished to do so, subject", "def __init__(self): super().__init__('file') self.log_stdout = False self.log_command = False def", "'--exclude', 'ascii', path]) if self.return_code != 0: raise FileError('\\n'.join(self.stderr)) return", "is_executable_elf(self, path): output = self.query(path) result = ('ELF' in output)", "# # Permission is hereby granted, free of charge, to", "to the following conditions: # # The above copyright notice", "subject to the following conditions: # # The above copyright", "of the Software. import os from .command import Command class", "do so, subject to the following conditions: # # The", "2020 <NAME> # # Permission is hereby granted, free of", "sublicense, and/or # sell copies of the Software, and to", "\"Software\"), # to deal in the Software without restriction, including", "query(self, path): self._run(['file', '-b', '--exclude', 'ascii', path]) if self.return_code !=", "in the Software without restriction, including without limitation the #", "to use, copy, modify, merge, publish, distribute, sublicense, and/or #", "included in # all copies or substantial portions of the", "def is_executable_elf(self, path): output = self.query(path) result = ('ELF' in", "# furnished to do so, subject to the following conditions:", "distribute, sublicense, and/or # sell copies of the Software, and", "import Command class FileError(RuntimeError): pass class File(Command): def __init__(self): super().__init__('file')", "!= 0: raise FileError('\\n'.join(self.stderr)) return '\\n'.join(self.stdout) def is_executable_elf(self, path): output", "shall be included in # all copies or substantial portions", "False self.log_command = False def query(self, path): self._run(['file', '-b', '--exclude',", "Software, and to permit persons to whom the Software is", "self.log_command = False def query(self, path): self._run(['file', '-b', '--exclude', 'ascii',", "of this software and associated documentation files (the \"Software\"), #", "documentation files (the \"Software\"), # to deal in the Software", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "copy of this software and associated documentation files (the \"Software\"),", "notice and this permission notice shall be included in #", "Command class FileError(RuntimeError): pass class File(Command): def __init__(self): super().__init__('file') self.log_stdout", "restriction, including without limitation the # rights to use, copy,", "granted, free of charge, to any person obtaining a #", "result = ('ELF' in output) and ('executable' in output) return", "self.query(path) result = ('ELF' in output) and ('executable' in output)", "copyright notice and this permission notice shall be included in", "all copies or substantial portions of the Software. import os", "Software without restriction, including without limitation the # rights to", "# Permission is hereby granted, free of charge, to any", "copies or substantial portions of the Software. import os from", "# Copyright 2020 <NAME> # # Permission is hereby granted,", "pass class File(Command): def __init__(self): super().__init__('file') self.log_stdout = False self.log_command", "<NAME> # # Permission is hereby granted, free of charge,", "File(Command): def __init__(self): super().__init__('file') self.log_stdout = False self.log_command = False", "the Software. import os from .command import Command class FileError(RuntimeError):", "False def query(self, path): self._run(['file', '-b', '--exclude', 'ascii', path]) if", "sell copies of the Software, and to permit persons to", "or substantial portions of the Software. import os from .command", "use, copy, modify, merge, publish, distribute, sublicense, and/or # sell", "without limitation the # rights to use, copy, modify, merge,", "publish, distribute, sublicense, and/or # sell copies of the Software,", "software and associated documentation files (the \"Software\"), # to deal", "modify, merge, publish, distribute, sublicense, and/or # sell copies of", "class File(Command): def __init__(self): super().__init__('file') self.log_stdout = False self.log_command =", "# all copies or substantial portions of the Software. import", "obtaining a # copy of this software and associated documentation", "is # furnished to do so, subject to the following", "including without limitation the # rights to use, copy, modify,", "to whom the Software is # furnished to do so,", "self._run(['file', '-b', '--exclude', 'ascii', path]) if self.return_code != 0: raise", "merge, publish, distribute, sublicense, and/or # sell copies of the", "Permission is hereby granted, free of charge, to any person", "furnished to do so, subject to the following conditions: #", "to do so, subject to the following conditions: # #", "# The above copyright notice and this permission notice shall", "limitation the # rights to use, copy, modify, merge, publish,", "the # rights to use, copy, modify, merge, publish, distribute,", "so, subject to the following conditions: # # The above", "'ascii', path]) if self.return_code != 0: raise FileError('\\n'.join(self.stderr)) return '\\n'.join(self.stdout)", "output = self.query(path) result = ('ELF' in output) and ('executable'", "path): self._run(['file', '-b', '--exclude', 'ascii', path]) if self.return_code != 0:", "deal in the Software without restriction, including without limitation the", "The above copyright notice and this permission notice shall be", "notice shall be included in # all copies or substantial", "portions of the Software. import os from .command import Command", "raise FileError('\\n'.join(self.stderr)) return '\\n'.join(self.stdout) def is_executable_elf(self, path): output = self.query(path)", "'-b', '--exclude', 'ascii', path]) if self.return_code != 0: raise FileError('\\n'.join(self.stderr))", ".command import Command class FileError(RuntimeError): pass class File(Command): def __init__(self):", "# to deal in the Software without restriction, including without", "Copyright 2020 <NAME> # # Permission is hereby granted, free", "= ('ELF' in output) and ('executable' in output) return result", "and this permission notice shall be included in # all", "self.return_code != 0: raise FileError('\\n'.join(self.stderr)) return '\\n'.join(self.stdout) def is_executable_elf(self, path):", "the following conditions: # # The above copyright notice and", "of charge, to any person obtaining a # copy of", "to any person obtaining a # copy of this software", "this software and associated documentation files (the \"Software\"), # to", "following conditions: # # The above copyright notice and this", "to permit persons to whom the Software is # furnished", "Software. import os from .command import Command class FileError(RuntimeError): pass", "# copy of this software and associated documentation files (the" ]
[ "other urls present on the records if --select-url is given.)\")", "help=\"A file to filter urls by. Excludes urls with 'desc'", "records: record_remove_urls_not_matching(record, pattern) # sort the records by id, to", "{}, n: int): hosts = [h for h in hosts_to_urls.keys()", "def record_get_urls_matching(record: {}, pattern: str) -> [{}]: result = []", "len(hosts_to_urls[h])) for host in hosts: print(\"% 6d\\t%s\" % (len(hosts_to_urls[host]), host))", "pattern) # sort the records by id, to be extra", "records by id, to be extra sure, that we get", "filter_records_without_url(records: []) -> []: return [r for r in records", "= file.read().splitlines() for record in records: record[\"urls\"] = [url for", "records if --select-url is given.)\") parser.add_argument(\"--print-pub-date\", action=\"store_true\", help=\"Print the earliest", "pattern1: str, pattern2: str): # It should be ok, to", "try: furl_obj = furl.furl(url) if not furl_obj.host: furl_obj = furl.furl(\"http://\"", "to be extra sure, that we get the same order", "filters = file.read().splitlines() for record in records: record[\"urls\"] = [url", "as a csv column records = sorted(records, key=lambda r: r.get(\"id\"))", "in the file.\") # these are arguments to print some", "records if record_has_matching_url(r, pattern1)} ids2 = {r.get(\"id\") for r in", "(len(hosts_to_urls[host]), host)) def print_urls_for_host(hosts_to_urls: {}, host: str): urls = hosts_to_urls.get(host,", "as file: records = json.load(file) records = filter_records_without_url(records) # filter", "for url in record.get(\"urls\"): host = determine_host(url.get(\"url\")) result[host].add(url.get(\"url\")) return result", "len(ids2), \"both\": len(ids_both)}.items(): print(f\"{host}: {number}\") def record_has_matching_url(record: {}, pattern: str)", "print some information about them.\") parser.add_argument(\"scrape_file\", type=str, help=\"The file that", "len(hosts_to_urls[h]) > n] hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h])) for", "of each urls for the selected records. (Ignores other urls", "[] for year_s in year_strings: try: years.append(int(year_s)) except ValueError: print(f\"Not", "elif args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0) # check in how many", "if any(r.get(\"urls\"))] def build_furl(url: str) -> furl.furl: try: furl_obj =", "furl.furl(url) if not furl_obj.host: furl_obj = furl.furl(\"http://\" + url) return", "exit if args.patterns_cooccur: host1, host2 = args.patterns_cooccur.split(\",\") print_how_often_url_patterns_cooccur(records, host1, host2)", "in filters] records = filter_records_without_url(records) # print unique hosts or", "based on a url pattern, remove all non-matching urls from", "import argparse import csv import furl import json import re", "work together select by a url pattern then print information", "{}, pattern: str) -> bool: return any(record_get_urls_matching(record, pattern)) def record_get_urls_matching(record:", "import json import re import sys from collections import defaultdict", "file to filter urls by. Excludes urls with 'desc' fields", "records = json.load(file) records = filter_records_without_url(records) # filter urls by", "to_print.append(\"|\".join(record.get(\"languages\", []))) writer.writerow(to_print) if __name__ == '__main__': parser = argparse.ArgumentParser(", "with 'desc' fields matching a line in the file.\") #", "a url pattern then print information about the records parser.add_argument(\"--select-by-url\",", "= [] if args.print_id: to_print.append(record.get(\"id\", \"\")) if args.print_url: to_print.append(record.get(\"urls\")[0].get(\"url\") if", "hosts that appear more than n times in the records", "to work together select by a url pattern then print", "column records = sorted(records, key=lambda r: r.get(\"id\")) writer = csv.writer(sys.stdout,", "else \"\" def main(args: argparse.Namespace): with open(args.scrape_file, \"r\") as file:", "args.select_by_url: pattern = args.select_by_url records = [r for r in", "the records parser.add_argument(\"--select-by-url\", type=str, help=\"Give a pattern for a url", "[str]) -> str: years = [] for year_s in year_strings:", "if not furl_obj.host: furl_obj = furl.furl(\"http://\" + url) return furl_obj", "url fields, then exit.\") # these are meant to work", "0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0) # check", "meant to work together select by a url pattern then", "ids1 = {r.get(\"id\") for r in records if record_has_matching_url(r, pattern1)}", "def main(args: argparse.Namespace): with open(args.scrape_file, \"r\") as file: records =", "[]) -> {str: {str}}: result = defaultdict(set) for record in", "records = filter_records_without_url(records) # print unique hosts or urls, then", "-> str: furl_obj = build_furl(url) return re.sub(r\"^www[0-9]*\\.\", \"\", furl_obj.host) def", "if len(hosts_to_urls[h]) > n] hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h]))", "-*- import argparse import csv import furl import json import", "publication year for each of the selected records.\") parser.add_argument(\"--print-id\", action=\"store_true\",", "exit(0) # do some selection based on a url pattern,", "-> bool: return any(record_get_urls_matching(record, pattern)) def record_get_urls_matching(record: {}, pattern: str)", "many records the two given hosts co-occur, then exit if", "remove all non-matching urls from the record if args.select_by_url: pattern", "a line in the file.\") # these are arguments to", "n times in the records urls, then exit.\") parser.add_argument(\"--print-host-urls\", type=str,", "type=str, help=\"The file that contains the zenon dumps as json.\")", "the selected records' ids\") parser.add_argument(\"--print-languages\", action=\"store_true\", help=\"Print the selected records'", "record_get_urls_matching(record, pattern) def earliest_year(year_strings: [str]) -> str: years = []", "[h for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n] hosts", "def determine_host(url: str) -> str: furl_obj = build_furl(url) return re.sub(r\"^www[0-9]*\\.\",", "to_print.append(earliest_year(record.get(\"publicationDates\", []))) if args.print_languages: to_print.append(\"|\".join(record.get(\"languages\", []))) writer.writerow(to_print) if __name__ ==", "with open(args.scrape_file, \"r\") as file: records = json.load(file) records =", "determine_host(url: str) -> str: furl_obj = build_furl(url) return re.sub(r\"^www[0-9]*\\.\", \"\",", "for record in records: to_print = [] if args.print_id: to_print.append(record.get(\"id\",", "is a year: '{year_s}'\", file=sys.stderr) continue return str(sorted(years)[0]) if any(years)", "try: years.append(int(year_s)) except ValueError: print(f\"Not a string that is a", "parser.add_argument(\"scrape_file\", type=str, help=\"The file that contains the zenon dumps as", "= build_hosts_to_urls(records) if args.print_common_hosts >= 0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif args.print_host_urls:", "parser.add_argument(\"--patterns-cooccur\", type=str, help=\"Format: 'pattern1,pattern2', print how often these occur in", "to select records by.\") parser.add_argument(\"--print-url\", action=\"store_true\", help=\"Print the first of", "defaultdict(set) for record in records: for url in record.get(\"urls\"): host", "def build_furl(url: str) -> furl.furl: try: furl_obj = furl.furl(url) if", "first of each urls for the selected records. (Ignores other", "in records: for url in record.get(\"urls\"): host = determine_host(url.get(\"url\")) result[host].add(url.get(\"url\"))", "print_most_common_url_hosts(hosts_to_urls: {}, n: int): hosts = [h for h in", "furl_obj = build_furl(url) return re.sub(r\"^www[0-9]*\\.\", \"\", furl_obj.host) def build_hosts_to_urls(records: [])", "if any(years) else \"\" def main(args: argparse.Namespace): with open(args.scrape_file, \"r\")", "def record_has_matching_url(record: {}, pattern: str) -> bool: return any(record_get_urls_matching(record, pattern))", "exit(0) # check in how many records the two given", "pattern then print information about the records parser.add_argument(\"--select-by-url\", type=str, help=\"Give", "help=\"Print the first of each urls for the selected records.", "if args.select_by_url: pattern = args.select_by_url records = [r for r", "file=sys.stderr) continue return str(sorted(years)[0]) if any(years) else \"\" def main(args:", "len(ids_both)}.items(): print(f\"{host}: {number}\") def record_has_matching_url(record: {}, pattern: str) -> bool:", "year for each of the selected records.\") parser.add_argument(\"--print-id\", action=\"store_true\", help=\"Print", "in {pattern1: len(ids1), pattern2: len(ids2), \"both\": len(ids_both)}.items(): print(f\"{host}: {number}\") def", "and print some information about them.\") parser.add_argument(\"scrape_file\", type=str, help=\"The file", "select records by.\") parser.add_argument(\"--print-url\", action=\"store_true\", help=\"Print the first of each", "records: to_print = [] if args.print_id: to_print.append(record.get(\"id\", \"\")) if args.print_url:", "given hosts co-occur, then exit if args.patterns_cooccur: host1, host2 =", "-> [{}]: result = [] for url in record.get(\"urls\"): if", "str: years = [] for year_s in year_strings: try: years.append(int(year_s))", "if --select-url is given.)\") parser.add_argument(\"--print-pub-date\", action=\"store_true\", help=\"Print the earliest publication", "import csv import furl import json import re import sys", "fields, then exit.\") # these are meant to work together", "{}, pattern: str): record[\"urls\"] = record_get_urls_matching(record, pattern) def earliest_year(year_strings: [str])", "if args.print_languages: to_print.append(\"|\".join(record.get(\"languages\", []))) writer.writerow(to_print) if __name__ == '__main__': parser", "furl_obj except ValueError: return furl.furl(\"https://invalid-url.xyz\") def determine_host(url: str) -> str:", "records and print some information about them.\") parser.add_argument(\"scrape_file\", type=str, help=\"The", "record.get(\"urls\") if url.get(\"desc\") not in filters] records = filter_records_without_url(records) #", "pattern1)} ids2 = {r.get(\"id\") for r in records if record_has_matching_url(r,", "It should be ok, to only pattern match the hosts", "url.get(\"desc\") not in filters] records = filter_records_without_url(records) # print unique", "select by a url pattern then print information about the", "import re import sys from collections import defaultdict def filter_records_without_url(records:", "record in records: to_print = [] if args.print_id: to_print.append(record.get(\"id\", \"\"))", "filter_records_without_url(records) # print unique hosts or urls, then exit if", "sys from collections import defaultdict def filter_records_without_url(records: []) -> []:", "re.sub(r\"^www[0-9]*\\.\", \"\", furl_obj.host) def build_hosts_to_urls(records: []) -> {str: {str}}: result", "result def print_most_common_url_hosts(hosts_to_urls: {}, n: int): hosts = [h for", "any(record.get(\"urls\")) else \"\") if args.print_pub_date: to_print.append(earliest_year(record.get(\"publicationDates\", []))) if args.print_languages: to_print.append(\"|\".join(record.get(\"languages\",", "urls with 'desc' fields matching a line in the file.\")", "two given hosts co-occur, then exit if args.patterns_cooccur: host1, host2", "# do some selection based on a url pattern, remove", "exit.\") parser.add_argument(\"--patterns-cooccur\", type=str, help=\"Format: 'pattern1,pattern2', print how often these occur", "args.print_languages: to_print.append(\"|\".join(record.get(\"languages\", []))) writer.writerow(to_print) if __name__ == '__main__': parser =", "csv import furl import json import re import sys from", "if args.print_common_hosts >= 0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls)", "these are arguments to print some specific information parser.add_argument(\"--print-common-hosts\", type=int,", "in record.get(\"urls\"): if any(re.findall(pattern, url.get(\"url\"))): result.append(url) return result def record_remove_urls_not_matching(record:", "in the records urls, then exit.\") parser.add_argument(\"--print-host-urls\", type=str, help=\"Print all", "file=sys.stderr) def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str): # It", "every time this is called # print each line as", "= [r for r in records if record_has_matching_url(r, pattern)] for", "\"r\") as file: filters = file.read().splitlines() for record in records:", "zenon dumps as json.\") parser.add_argument(\"--desc-filters\", type=str, help=\"A file to filter", "r.get(\"id\")) writer = csv.writer(sys.stdout, delimiter=\",\", quoting=csv.QUOTE_ALL) for record in records:", "= json.load(file) records = filter_records_without_url(records) # filter urls by the", "arguments to print some specific information parser.add_argument(\"--print-common-hosts\", type=int, default=-1, help=\"Print", "open(args.desc_filters, \"r\") as file: filters = file.read().splitlines() for record in", "if args.print_id: to_print.append(record.get(\"id\", \"\")) if args.print_url: to_print.append(record.get(\"urls\")[0].get(\"url\") if any(record.get(\"urls\")) else", "are arguments to print some specific information parser.add_argument(\"--print-common-hosts\", type=int, default=-1,", "determine_host(url.get(\"url\")) result[host].add(url.get(\"url\")) return result def print_most_common_url_hosts(hosts_to_urls: {}, n: int): hosts", "in year_strings: try: years.append(int(year_s)) except ValueError: print(f\"Not a string that", "if __name__ == '__main__': parser = argparse.ArgumentParser( description=\"Process a file", "each urls for the selected records. (Ignores other urls present", "ids2 = {r.get(\"id\") for r in records if record_has_matching_url(r, pattern2)}", "# print unique hosts or urls, then exit if args.print_host_urls", "from the record if args.select_by_url: pattern = args.select_by_url records =", "url.get(\"url\"))): result.append(url) return result def record_remove_urls_not_matching(record: {}, pattern: str): record[\"urls\"]", "return str(sorted(years)[0]) if any(years) else \"\" def main(args: argparse.Namespace): with", "for record in records: record[\"urls\"] = [url for url in", "r: r.get(\"id\")) writer = csv.writer(sys.stdout, delimiter=\",\", quoting=csv.QUOTE_ALL) for record in", "import sys from collections import defaultdict def filter_records_without_url(records: []) ->", "= [h for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n]", "a url pattern, remove all non-matching urls from the record", "str) -> bool: return any(record_get_urls_matching(record, pattern)) def record_get_urls_matching(record: {}, pattern:", "then exit.\") # these are meant to work together select", "the records by id, to be extra sure, that we", "utf-8 -*- import argparse import csv import furl import json", "args.desc_filters: with open(args.desc_filters, \"r\") as file: filters = file.read().splitlines() for", "record.get(\"urls\"): if any(re.findall(pattern, url.get(\"url\"))): result.append(url) return result def record_remove_urls_not_matching(record: {},", "host1, host2 = args.patterns_cooccur.split(\",\") print_how_often_url_patterns_cooccur(records, host1, host2) exit(0) # do", "here... ids1 = {r.get(\"id\") for r in records if record_has_matching_url(r,", "'__main__': parser = argparse.ArgumentParser( description=\"Process a file with zenon json", "coding: utf-8 -*- import argparse import csv import furl import", "= {r.get(\"id\") for r in records if record_has_matching_url(r, pattern2)} ids_both", "\"\" def main(args: argparse.Namespace): with open(args.scrape_file, \"r\") as file: records", "the two given hosts co-occur, then exit if args.patterns_cooccur: host1,", "return furl.furl(\"https://invalid-url.xyz\") def determine_host(url: str) -> str: furl_obj = build_furl(url)", "these occur in single records url fields, then exit.\") #", "type=str, help=\"Print all urls for the host, then exit.\") parser.add_argument(\"--patterns-cooccur\",", "records' ids\") parser.add_argument(\"--print-languages\", action=\"store_true\", help=\"Print the selected records' languages\") main(parser.parse_args())", "description=\"Process a file with zenon json records and print some", "zenon json records and print some information about them.\") parser.add_argument(\"scrape_file\",", "records parser.add_argument(\"--select-by-url\", type=str, help=\"Give a pattern for a url to", "args.print_url: to_print.append(record.get(\"urls\")[0].get(\"url\") if any(record.get(\"urls\")) else \"\") if args.print_pub_date: to_print.append(earliest_year(record.get(\"publicationDates\", [])))", "print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0) # check in how many records the", "ValueError: return furl.furl(\"https://invalid-url.xyz\") def determine_host(url: str) -> str: furl_obj =", "\"\", furl_obj.host) def build_hosts_to_urls(records: []) -> {str: {str}}: result =", "type=str, help=\"Format: 'pattern1,pattern2', print how often these occur in single", "records = filter_records_without_url(records) # filter urls by the user-provided filter", "for r in records if record_has_matching_url(r, pattern)] for record in", "filter urls by. Excludes urls with 'desc' fields matching a", "= [] for url in record.get(\"urls\"): if any(re.findall(pattern, url.get(\"url\"))): result.append(url)", "collections import defaultdict def filter_records_without_url(records: []) -> []: return [r", "if args.print_url: to_print.append(record.get(\"urls\")[0].get(\"url\") if any(record.get(\"urls\")) else \"\") if args.print_pub_date: to_print.append(earliest_year(record.get(\"publicationDates\",", "urls = hosts_to_urls.get(host, []) for url in urls: print(url) if", "ValueError: print(f\"Not a string that is a year: '{year_s}'\", file=sys.stderr)", "filter list if args.desc_filters: with open(args.desc_filters, \"r\") as file: filters", "the host, then exit.\") parser.add_argument(\"--patterns-cooccur\", type=str, help=\"Format: 'pattern1,pattern2', print how", "for host in hosts: print(\"% 6d\\t%s\" % (len(hosts_to_urls[host]), host)) def", "record_has_matching_url(r, pattern1)} ids2 = {r.get(\"id\") for r in records if", "pattern2: len(ids2), \"both\": len(ids_both)}.items(): print(f\"{host}: {number}\") def record_has_matching_url(record: {}, pattern:", "n: int): hosts = [h for h in hosts_to_urls.keys() if", "hosts here... ids1 = {r.get(\"id\") for r in records if", "args.print_pub_date: to_print.append(earliest_year(record.get(\"publicationDates\", []))) if args.print_languages: to_print.append(\"|\".join(record.get(\"languages\", []))) writer.writerow(to_print) if __name__", "single records url fields, then exit.\") # these are meant", "record[\"urls\"] = record_get_urls_matching(record, pattern) def earliest_year(year_strings: [str]) -> str: years", "urls: print(url) if not any(urls): print(f\"No urls for host: '{host}'\",", "them.\") parser.add_argument(\"scrape_file\", type=str, help=\"The file that contains the zenon dumps", "= sorted(hosts, key=lambda h: len(hosts_to_urls[h])) for host in hosts: print(\"%", "help=\"Format: 'pattern1,pattern2', print how often these occur in single records", "check in how many records the two given hosts co-occur,", "if record_has_matching_url(r, pattern1)} ids2 = {r.get(\"id\") for r in records", "records by.\") parser.add_argument(\"--print-url\", action=\"store_true\", help=\"Print the first of each urls", "r in records if any(r.get(\"urls\"))] def build_furl(url: str) -> furl.furl:", "is called # print each line as a csv column", "for the host, then exit.\") parser.add_argument(\"--patterns-cooccur\", type=str, help=\"Format: 'pattern1,pattern2', print", "do some selection based on a url pattern, remove all", "url pattern, remove all non-matching urls from the record if", "re import sys from collections import defaultdict def filter_records_without_url(records: [])", "import furl import json import re import sys from collections", "records: for url in record.get(\"urls\"): host = determine_host(url.get(\"url\")) result[host].add(url.get(\"url\")) return", "n] hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h])) for host in", "args.print_common_hosts >= 0: hosts_to_urls = build_hosts_to_urls(records) if args.print_common_hosts >= 0:", "return re.sub(r\"^www[0-9]*\\.\", \"\", furl_obj.host) def build_hosts_to_urls(records: []) -> {str: {str}}:", "= defaultdict(set) for record in records: for url in record.get(\"urls\"):", "some information about them.\") parser.add_argument(\"scrape_file\", type=str, help=\"The file that contains", "print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str): # It should be", "json.load(file) records = filter_records_without_url(records) # filter urls by the user-provided", "\"\") if args.print_pub_date: to_print.append(earliest_year(record.get(\"publicationDates\", []))) if args.print_languages: to_print.append(\"|\".join(record.get(\"languages\", []))) writer.writerow(to_print)", "record in records: record[\"urls\"] = [url for url in record.get(\"urls\")", "host2 = args.patterns_cooccur.split(\",\") print_how_often_url_patterns_cooccur(records, host1, host2) exit(0) # do some", "'{year_s}'\", file=sys.stderr) continue return str(sorted(years)[0]) if any(years) else \"\" def", "in records: record_remove_urls_not_matching(record, pattern) # sort the records by id,", "then exit if args.print_host_urls or args.print_common_hosts >= 0: hosts_to_urls =", "pattern) def earliest_year(year_strings: [str]) -> str: years = [] for", "{pattern1: len(ids1), pattern2: len(ids2), \"both\": len(ids_both)}.items(): print(f\"{host}: {number}\") def record_has_matching_url(record:", "str): urls = hosts_to_urls.get(host, []) for url in urls: print(url)", "any(re.findall(pattern, url.get(\"url\"))): result.append(url) return result def record_remove_urls_not_matching(record: {}, pattern: str):", "# sort the records by id, to be extra sure,", "any(years) else \"\" def main(args: argparse.Namespace): with open(args.scrape_file, \"r\") as", "information parser.add_argument(\"--print-common-hosts\", type=int, default=-1, help=\"Print hosts that appear more than", "parser.add_argument(\"--print-host-urls\", type=str, help=\"Print all urls for the host, then exit.\")", "-> []: return [r for r in records if any(r.get(\"urls\"))]", "host: str): urls = hosts_to_urls.get(host, []) for url in urls:", "pattern2)} ids_both = ids1.intersection(ids2) for host, number in {pattern1: len(ids1),", "def earliest_year(year_strings: [str]) -> str: years = [] for year_s", "how many records the two given hosts co-occur, then exit", "[url for url in record.get(\"urls\") if url.get(\"desc\") not in filters]", "then exit.\") parser.add_argument(\"--print-host-urls\", type=str, help=\"Print all urls for the host,", "any(record_get_urls_matching(record, pattern)) def record_get_urls_matching(record: {}, pattern: str) -> [{}]: result", "[]: return [r for r in records if any(r.get(\"urls\"))] def", "given.)\") parser.add_argument(\"--print-pub-date\", action=\"store_true\", help=\"Print the earliest publication year for each", "a file with zenon json records and print some information", "the record if args.select_by_url: pattern = args.select_by_url records = [r", "import defaultdict def filter_records_without_url(records: []) -> []: return [r for", "urls by the user-provided filter list if args.desc_filters: with open(args.desc_filters,", "hosts: print(\"% 6d\\t%s\" % (len(hosts_to_urls[host]), host)) def print_urls_for_host(hosts_to_urls: {}, host:", "file: records = json.load(file) records = filter_records_without_url(records) # filter urls", "= sorted(records, key=lambda r: r.get(\"id\")) writer = csv.writer(sys.stdout, delimiter=\",\", quoting=csv.QUOTE_ALL)", "delimiter=\",\", quoting=csv.QUOTE_ALL) for record in records: to_print = [] if", "bool: return any(record_get_urls_matching(record, pattern)) def record_get_urls_matching(record: {}, pattern: str) ->", "urls by. Excludes urls with 'desc' fields matching a line", "action=\"store_true\", help=\"Print the selected records' ids\") parser.add_argument(\"--print-languages\", action=\"store_true\", help=\"Print the", "by. Excludes urls with 'desc' fields matching a line in", "selection based on a url pattern, remove all non-matching urls", "# these are meant to work together select by a", "url in urls: print(url) if not any(urls): print(f\"No urls for", "result = [] for url in record.get(\"urls\"): if any(re.findall(pattern, url.get(\"url\"))):", "records. (Ignores other urls present on the records if --select-url", "some selection based on a url pattern, remove all non-matching", "action=\"store_true\", help=\"Print the first of each urls for the selected", "present on the records if --select-url is given.)\") parser.add_argument(\"--print-pub-date\", action=\"store_true\",", "[r for r in records if record_has_matching_url(r, pattern)] for record", "exit.\") parser.add_argument(\"--print-host-urls\", type=str, help=\"Print all urls for the host, then", "record_has_matching_url(r, pattern2)} ids_both = ids1.intersection(ids2) for host, number in {pattern1:", "def print_most_common_url_hosts(hosts_to_urls: {}, n: int): hosts = [h for h", "# these are arguments to print some specific information parser.add_argument(\"--print-common-hosts\",", "for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n] hosts =", "for url in urls: print(url) if not any(urls): print(f\"No urls", "file.\") # these are arguments to print some specific information", "to_print = [] if args.print_id: to_print.append(record.get(\"id\", \"\")) if args.print_url: to_print.append(record.get(\"urls\")[0].get(\"url\")", "url in record.get(\"urls\"): host = determine_host(url.get(\"url\")) result[host].add(url.get(\"url\")) return result def", "str: furl_obj = build_furl(url) return re.sub(r\"^www[0-9]*\\.\", \"\", furl_obj.host) def build_hosts_to_urls(records:", "records if any(r.get(\"urls\"))] def build_furl(url: str) -> furl.furl: try: furl_obj", "in how many records the two given hosts co-occur, then", "pattern: str) -> [{}]: result = [] for url in", "the user-provided filter list if args.desc_filters: with open(args.desc_filters, \"r\") as", "6d\\t%s\" % (len(hosts_to_urls[host]), host)) def print_urls_for_host(hosts_to_urls: {}, host: str): urls", "matching a line in the file.\") # these are arguments", "parser.add_argument(\"--print-pub-date\", action=\"store_true\", help=\"Print the earliest publication year for each of", "specific information parser.add_argument(\"--print-common-hosts\", type=int, default=-1, help=\"Print hosts that appear more", "pattern, remove all non-matching urls from the record if args.select_by_url:", "the selected records. (Ignores other urls present on the records", "action=\"store_true\", help=\"Print the earliest publication year for each of the", "(Ignores other urls present on the records if --select-url is", "from collections import defaultdict def filter_records_without_url(records: []) -> []: return", "url) return furl_obj except ValueError: return furl.furl(\"https://invalid-url.xyz\") def determine_host(url: str)", "url in record.get(\"urls\"): if any(re.findall(pattern, url.get(\"url\"))): result.append(url) return result def", "year_s in year_strings: try: years.append(int(year_s)) except ValueError: print(f\"Not a string", "the zenon dumps as json.\") parser.add_argument(\"--desc-filters\", type=str, help=\"A file to", "all non-matching urls from the record if args.select_by_url: pattern =", "args.select_by_url records = [r for r in records if record_has_matching_url(r,", "that appear more than n times in the records urls,", "result.append(url) return result def record_remove_urls_not_matching(record: {}, pattern: str): record[\"urls\"] =", "times in the records urls, then exit.\") parser.add_argument(\"--print-host-urls\", type=str, help=\"Print", "help=\"Print the earliest publication year for each of the selected", "id, to be extra sure, that we get the same", "in record.get(\"urls\"): host = determine_host(url.get(\"url\")) result[host].add(url.get(\"url\")) return result def print_most_common_url_hosts(hosts_to_urls:", "filter_records_without_url(records) # filter urls by the user-provided filter list if", "for host: '{host}'\", file=sys.stderr) def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2:", ">= 0: hosts_to_urls = build_hosts_to_urls(records) if args.print_common_hosts >= 0: print_most_common_url_hosts(hosts_to_urls,", "all urls for the host, then exit.\") parser.add_argument(\"--patterns-cooccur\", type=str, help=\"Format:", "for r in records if any(r.get(\"urls\"))] def build_furl(url: str) ->", "if record_has_matching_url(r, pattern2)} ids_both = ids1.intersection(ids2) for host, number in", "records = [r for r in records if record_has_matching_url(r, pattern)]", "return result def record_remove_urls_not_matching(record: {}, pattern: str): record[\"urls\"] = record_get_urls_matching(record,", "as file: filters = file.read().splitlines() for record in records: record[\"urls\"]", "sure, that we get the same order every time this", "sorted(records, key=lambda r: r.get(\"id\")) writer = csv.writer(sys.stdout, delimiter=\",\", quoting=csv.QUOTE_ALL) for", "= argparse.ArgumentParser( description=\"Process a file with zenon json records and", "url pattern then print information about the records parser.add_argument(\"--select-by-url\", type=str,", "the selected records.\") parser.add_argument(\"--print-id\", action=\"store_true\", help=\"Print the selected records' ids\")", "help=\"Print the selected records' ids\") parser.add_argument(\"--print-languages\", action=\"store_true\", help=\"Print the selected", "earliest_year(year_strings: [str]) -> str: years = [] for year_s in", "\"r\") as file: records = json.load(file) records = filter_records_without_url(records) #", "list if args.desc_filters: with open(args.desc_filters, \"r\") as file: filters =", "= furl.furl(url) if not furl_obj.host: furl_obj = furl.furl(\"http://\" + url)", "same order every time this is called # print each", "--select-url is given.)\") parser.add_argument(\"--print-pub-date\", action=\"store_true\", help=\"Print the earliest publication year", "if any(record.get(\"urls\")) else \"\") if args.print_pub_date: to_print.append(earliest_year(record.get(\"publicationDates\", []))) if args.print_languages:", "main(args: argparse.Namespace): with open(args.scrape_file, \"r\") as file: records = json.load(file)", "the records urls, then exit.\") parser.add_argument(\"--print-host-urls\", type=str, help=\"Print all urls", "= ids1.intersection(ids2) for host, number in {pattern1: len(ids1), pattern2: len(ids2),", "about the records parser.add_argument(\"--select-by-url\", type=str, help=\"Give a pattern for a", "hosts_to_urls.keys() if len(hosts_to_urls[h]) > n] hosts = sorted(hosts, key=lambda h:", "more than n times in the records urls, then exit.\")", "for year_s in year_strings: try: years.append(int(year_s)) except ValueError: print(f\"Not a", "argparse.Namespace): with open(args.scrape_file, \"r\") as file: records = json.load(file) records", "print unique hosts or urls, then exit if args.print_host_urls or", "print how often these occur in single records url fields,", "if args.patterns_cooccur: host1, host2 = args.patterns_cooccur.split(\",\") print_how_often_url_patterns_cooccur(records, host1, host2) exit(0)", "record_has_matching_url(record: {}, pattern: str) -> bool: return any(record_get_urls_matching(record, pattern)) def", "in record.get(\"urls\") if url.get(\"desc\") not in filters] records = filter_records_without_url(records)", "furl_obj.host: furl_obj = furl.furl(\"http://\" + url) return furl_obj except ValueError:", "a pattern for a url to select records by.\") parser.add_argument(\"--print-url\",", "furl.furl(\"https://invalid-url.xyz\") def determine_host(url: str) -> str: furl_obj = build_furl(url) return", "{str: {str}}: result = defaultdict(set) for record in records: for", "non-matching urls from the record if args.select_by_url: pattern = args.select_by_url", "if url.get(\"desc\") not in filters] records = filter_records_without_url(records) # print", "selected records.\") parser.add_argument(\"--print-id\", action=\"store_true\", help=\"Print the selected records' ids\") parser.add_argument(\"--print-languages\",", "open(args.scrape_file, \"r\") as file: records = json.load(file) records = filter_records_without_url(records)", "= [url for url in record.get(\"urls\") if url.get(\"desc\") not in", "order every time this is called # print each line", "= record_get_urls_matching(record, pattern) def earliest_year(year_strings: [str]) -> str: years =", "year_strings: try: years.append(int(year_s)) except ValueError: print(f\"Not a string that is", "print(f\"{host}: {number}\") def record_has_matching_url(record: {}, pattern: str) -> bool: return", "build_hosts_to_urls(records) if args.print_common_hosts >= 0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif args.print_host_urls: print_urls_for_host(hosts_to_urls,", "furl.furl(\"http://\" + url) return furl_obj except ValueError: return furl.furl(\"https://invalid-url.xyz\") def", "return result def print_most_common_url_hosts(hosts_to_urls: {}, n: int): hosts = [h", "host: '{host}'\", file=sys.stderr) def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str):", "to only pattern match the hosts here... ids1 = {r.get(\"id\")", "if args.desc_filters: with open(args.desc_filters, \"r\") as file: filters = file.read().splitlines()", "parser.add_argument(\"--print-id\", action=\"store_true\", help=\"Print the selected records' ids\") parser.add_argument(\"--print-languages\", action=\"store_true\", help=\"Print", "a url to select records by.\") parser.add_argument(\"--print-url\", action=\"store_true\", help=\"Print the", "[]) -> []: return [r for r in records if", "def build_hosts_to_urls(records: []) -> {str: {str}}: result = defaultdict(set) for", "else \"\") if args.print_pub_date: to_print.append(earliest_year(record.get(\"publicationDates\", []))) if args.print_languages: to_print.append(\"|\".join(record.get(\"languages\", [])))", "are meant to work together select by a url pattern", "records the two given hosts co-occur, then exit if args.patterns_cooccur:", "= {r.get(\"id\") for r in records if record_has_matching_url(r, pattern1)} ids2", "build_furl(url: str) -> furl.furl: try: furl_obj = furl.furl(url) if not", "key=lambda h: len(hosts_to_urls[h])) for host in hosts: print(\"% 6d\\t%s\" %", "-> furl.furl: try: furl_obj = furl.furl(url) if not furl_obj.host: furl_obj", "str, pattern2: str): # It should be ok, to only", "= build_furl(url) return re.sub(r\"^www[0-9]*\\.\", \"\", furl_obj.host) def build_hosts_to_urls(records: []) ->", "time this is called # print each line as a", "h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n] hosts = sorted(hosts,", "urls for the host, then exit.\") parser.add_argument(\"--patterns-cooccur\", type=str, help=\"Format: 'pattern1,pattern2',", "file.read().splitlines() for record in records: record[\"urls\"] = [url for url", "help=\"Print all urls for the host, then exit.\") parser.add_argument(\"--patterns-cooccur\", type=str,", "[r for r in records if any(r.get(\"urls\"))] def build_furl(url: str)", "[]))) if args.print_languages: to_print.append(\"|\".join(record.get(\"languages\", []))) writer.writerow(to_print) if __name__ == '__main__':", "contains the zenon dumps as json.\") parser.add_argument(\"--desc-filters\", type=str, help=\"A file", "in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n] hosts = sorted(hosts, key=lambda", "print each line as a csv column records = sorted(records,", "type=int, default=-1, help=\"Print hosts that appear more than n times", "record_has_matching_url(r, pattern)] for record in records: record_remove_urls_not_matching(record, pattern) # sort", "number in {pattern1: len(ids1), pattern2: len(ids2), \"both\": len(ids_both)}.items(): print(f\"{host}: {number}\")", "print some specific information parser.add_argument(\"--print-common-hosts\", type=int, default=-1, help=\"Print hosts that", "that we get the same order every time this is", "unique hosts or urls, then exit if args.print_host_urls or args.print_common_hosts", "csv.writer(sys.stdout, delimiter=\",\", quoting=csv.QUOTE_ALL) for record in records: to_print = []", "ok, to only pattern match the hosts here... ids1 =", "-> {str: {str}}: result = defaultdict(set) for record in records:", "== '__main__': parser = argparse.ArgumentParser( description=\"Process a file with zenon", "key=lambda r: r.get(\"id\")) writer = csv.writer(sys.stdout, delimiter=\",\", quoting=csv.QUOTE_ALL) for record", "writer = csv.writer(sys.stdout, delimiter=\",\", quoting=csv.QUOTE_ALL) for record in records: to_print", "type=str, help=\"Give a pattern for a url to select records", "furl import json import re import sys from collections import", "print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0) # check in", "result = defaultdict(set) for record in records: for url in", "help=\"The file that contains the zenon dumps as json.\") parser.add_argument(\"--desc-filters\",", "r in records if record_has_matching_url(r, pattern2)} ids_both = ids1.intersection(ids2) for", "year: '{year_s}'\", file=sys.stderr) continue return str(sorted(years)[0]) if any(years) else \"\"", "furl_obj.host) def build_hosts_to_urls(records: []) -> {str: {str}}: result = defaultdict(set)", "parser.add_argument(\"--desc-filters\", type=str, help=\"A file to filter urls by. Excludes urls", "filter urls by the user-provided filter list if args.desc_filters: with", "furl_obj = furl.furl(url) if not furl_obj.host: furl_obj = furl.furl(\"http://\" +", "or args.print_common_hosts >= 0: hosts_to_urls = build_hosts_to_urls(records) if args.print_common_hosts >=", "pattern)) def record_get_urls_matching(record: {}, pattern: str) -> [{}]: result =", "{r.get(\"id\") for r in records if record_has_matching_url(r, pattern1)} ids2 =", "with zenon json records and print some information about them.\")", "# -*- coding: utf-8 -*- import argparse import csv import", "get the same order every time this is called #", "help=\"Print hosts that appear more than n times in the", "selected records' ids\") parser.add_argument(\"--print-languages\", action=\"store_true\", help=\"Print the selected records' languages\")", "user-provided filter list if args.desc_filters: with open(args.desc_filters, \"r\") as file:", "csv column records = sorted(records, key=lambda r: r.get(\"id\")) writer =", "# filter urls by the user-provided filter list if args.desc_filters:", "by id, to be extra sure, that we get the", "the file.\") # these are arguments to print some specific", "print(\"% 6d\\t%s\" % (len(hosts_to_urls[host]), host)) def print_urls_for_host(hosts_to_urls: {}, host: str):", "co-occur, then exit if args.patterns_cooccur: host1, host2 = args.patterns_cooccur.split(\",\") print_how_often_url_patterns_cooccur(records,", "urls, then exit if args.print_host_urls or args.print_common_hosts >= 0: hosts_to_urls", "to filter urls by. Excludes urls with 'desc' fields matching", "except ValueError: print(f\"Not a string that is a year: '{year_s}'\",", "str) -> [{}]: result = [] for url in record.get(\"urls\"):", "for r in records if record_has_matching_url(r, pattern2)} ids_both = ids1.intersection(ids2)", "to_print.append(record.get(\"id\", \"\")) if args.print_url: to_print.append(record.get(\"urls\")[0].get(\"url\") if any(record.get(\"urls\")) else \"\") if", "json.\") parser.add_argument(\"--desc-filters\", type=str, help=\"A file to filter urls by. Excludes", "information about the records parser.add_argument(\"--select-by-url\", type=str, help=\"Give a pattern for", "for record in records: for url in record.get(\"urls\"): host =", "line as a csv column records = sorted(records, key=lambda r:", "hosts = [h for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) >", "host = determine_host(url.get(\"url\")) result[host].add(url.get(\"url\")) return result def print_most_common_url_hosts(hosts_to_urls: {}, n:", "in urls: print(url) if not any(urls): print(f\"No urls for host:", "by a url pattern then print information about the records", "the earliest publication year for each of the selected records.\")", "= filter_records_without_url(records) # print unique hosts or urls, then exit", "file: filters = file.read().splitlines() for record in records: record[\"urls\"] =", "in records if record_has_matching_url(r, pattern1)} ids2 = {r.get(\"id\") for r", "build_furl(url) return re.sub(r\"^www[0-9]*\\.\", \"\", furl_obj.host) def build_hosts_to_urls(records: []) -> {str:", "to print some specific information parser.add_argument(\"--print-common-hosts\", type=int, default=-1, help=\"Print hosts", "not in filters] records = filter_records_without_url(records) # print unique hosts", "for record in records: record_remove_urls_not_matching(record, pattern) # sort the records", "record in records: for url in record.get(\"urls\"): host = determine_host(url.get(\"url\"))", "host, number in {pattern1: len(ids1), pattern2: len(ids2), \"both\": len(ids_both)}.items(): print(f\"{host}:", "a string that is a year: '{year_s}'\", file=sys.stderr) continue return", "hosts_to_urls = build_hosts_to_urls(records) if args.print_common_hosts >= 0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif", "in records if record_has_matching_url(r, pattern2)} ids_both = ids1.intersection(ids2) for host,", "return [r for r in records if any(r.get(\"urls\"))] def build_furl(url:", "ids1.intersection(ids2) for host, number in {pattern1: len(ids1), pattern2: len(ids2), \"both\":", "help=\"Give a pattern for a url to select records by.\")", "host)) def print_urls_for_host(hosts_to_urls: {}, host: str): urls = hosts_to_urls.get(host, [])", "print(f\"No urls for host: '{host}'\", file=sys.stderr) def print_how_often_url_patterns_cooccur(records: [{}], pattern1:", "pattern2: str): # It should be ok, to only pattern", "should be ok, to only pattern match the hosts here...", "records if record_has_matching_url(r, pattern)] for record in records: record_remove_urls_not_matching(record, pattern)", "as json.\") parser.add_argument(\"--desc-filters\", type=str, help=\"A file to filter urls by.", "record in records: record_remove_urls_not_matching(record, pattern) # sort the records by", "args.print_id: to_print.append(record.get(\"id\", \"\")) if args.print_url: to_print.append(record.get(\"urls\")[0].get(\"url\") if any(record.get(\"urls\")) else \"\")", "in hosts: print(\"% 6d\\t%s\" % (len(hosts_to_urls[host]), host)) def print_urls_for_host(hosts_to_urls: {},", "= csv.writer(sys.stdout, delimiter=\",\", quoting=csv.QUOTE_ALL) for record in records: to_print =", "url to select records by.\") parser.add_argument(\"--print-url\", action=\"store_true\", help=\"Print the first", "for host, number in {pattern1: len(ids1), pattern2: len(ids2), \"both\": len(ids_both)}.items():", "in records: record[\"urls\"] = [url for url in record.get(\"urls\") if", "often these occur in single records url fields, then exit.\")", "\"both\": len(ids_both)}.items(): print(f\"{host}: {number}\") def record_has_matching_url(record: {}, pattern: str) ->", "hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h])) for host in hosts:", "that contains the zenon dumps as json.\") parser.add_argument(\"--desc-filters\", type=str, help=\"A", "hosts or urls, then exit if args.print_host_urls or args.print_common_hosts >=", "defaultdict def filter_records_without_url(records: []) -> []: return [r for r", "if args.print_host_urls or args.print_common_hosts >= 0: hosts_to_urls = build_hosts_to_urls(records) if", "not any(urls): print(f\"No urls for host: '{host}'\", file=sys.stderr) def print_how_often_url_patterns_cooccur(records:", "the first of each urls for the selected records. (Ignores", "= hosts_to_urls.get(host, []) for url in urls: print(url) if not", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import", "Excludes urls with 'desc' fields matching a line in the", "build_hosts_to_urls(records: []) -> {str: {str}}: result = defaultdict(set) for record", "appear more than n times in the records urls, then", "about them.\") parser.add_argument(\"scrape_file\", type=str, help=\"The file that contains the zenon", "str): # It should be ok, to only pattern match", "print(url) if not any(urls): print(f\"No urls for host: '{host}'\", file=sys.stderr)", "{}, pattern: str) -> [{}]: result = [] for url", "[] if args.print_id: to_print.append(record.get(\"id\", \"\")) if args.print_url: to_print.append(record.get(\"urls\")[0].get(\"url\") if any(record.get(\"urls\"))", "these are meant to work together select by a url", "furl_obj = furl.furl(\"http://\" + url) return furl_obj except ValueError: return", "\"\")) if args.print_url: to_print.append(record.get(\"urls\")[0].get(\"url\") if any(record.get(\"urls\")) else \"\") if args.print_pub_date:", "furl.furl: try: furl_obj = furl.furl(url) if not furl_obj.host: furl_obj =", "[]))) writer.writerow(to_print) if __name__ == '__main__': parser = argparse.ArgumentParser( description=\"Process", "how often these occur in single records url fields, then", "= determine_host(url.get(\"url\")) result[host].add(url.get(\"url\")) return result def print_most_common_url_hosts(hosts_to_urls: {}, n: int):", "-*- coding: utf-8 -*- import argparse import csv import furl", "file with zenon json records and print some information about", "quoting=csv.QUOTE_ALL) for record in records: to_print = [] if args.print_id:", "ids_both = ids1.intersection(ids2) for host, number in {pattern1: len(ids1), pattern2:", "dumps as json.\") parser.add_argument(\"--desc-filters\", type=str, help=\"A file to filter urls", "any(urls): print(f\"No urls for host: '{host}'\", file=sys.stderr) def print_how_often_url_patterns_cooccur(records: [{}],", "for a url to select records by.\") parser.add_argument(\"--print-url\", action=\"store_true\", help=\"Print", "extra sure, that we get the same order every time", "argparse import csv import furl import json import re import", "pattern match the hosts here... ids1 = {r.get(\"id\") for r", "occur in single records url fields, then exit.\") # these", "records: record[\"urls\"] = [url for url in record.get(\"urls\") if url.get(\"desc\")", "# It should be ok, to only pattern match the", "filters] records = filter_records_without_url(records) # print unique hosts or urls,", "we get the same order every time this is called", "'{host}'\", file=sys.stderr) def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str): #", "the records if --select-url is given.)\") parser.add_argument(\"--print-pub-date\", action=\"store_true\", help=\"Print the", "result[host].add(url.get(\"url\")) return result def print_most_common_url_hosts(hosts_to_urls: {}, n: int): hosts =", "for r in records if record_has_matching_url(r, pattern1)} ids2 = {r.get(\"id\")", "def record_remove_urls_not_matching(record: {}, pattern: str): record[\"urls\"] = record_get_urls_matching(record, pattern) def", "with open(args.desc_filters, \"r\") as file: filters = file.read().splitlines() for record", "= args.patterns_cooccur.split(\",\") print_how_often_url_patterns_cooccur(records, host1, host2) exit(0) # do some selection", "host2) exit(0) # do some selection based on a url", "pattern = args.select_by_url records = [r for r in records", "host, then exit.\") parser.add_argument(\"--patterns-cooccur\", type=str, help=\"Format: 'pattern1,pattern2', print how often", "h: len(hosts_to_urls[h])) for host in hosts: print(\"% 6d\\t%s\" % (len(hosts_to_urls[host]),", "in single records url fields, then exit.\") # these are", "records if record_has_matching_url(r, pattern2)} ids_both = ids1.intersection(ids2) for host, number", "default=-1, help=\"Print hosts that appear more than n times in", "if record_has_matching_url(r, pattern)] for record in records: record_remove_urls_not_matching(record, pattern) #", "pattern)] for record in records: record_remove_urls_not_matching(record, pattern) # sort the", "called # print each line as a csv column records", "python3 # -*- coding: utf-8 -*- import argparse import csv", "[{}]: result = [] for url in record.get(\"urls\"): if any(re.findall(pattern,", "# check in how many records the two given hosts", "url in record.get(\"urls\") if url.get(\"desc\") not in filters] records =", "sort the records by id, to be extra sure, that", "is given.)\") parser.add_argument(\"--print-pub-date\", action=\"store_true\", help=\"Print the earliest publication year for", "host=args.print_host_urls) exit(0) # check in how many records the two", "information about them.\") parser.add_argument(\"scrape_file\", type=str, help=\"The file that contains the", "hosts_to_urls.get(host, []) for url in urls: print(url) if not any(urls):", "of the selected records.\") parser.add_argument(\"--print-id\", action=\"store_true\", help=\"Print the selected records'", "then print information about the records parser.add_argument(\"--select-by-url\", type=str, help=\"Give a", "argparse.ArgumentParser( description=\"Process a file with zenon json records and print", "years = [] for year_s in year_strings: try: years.append(int(year_s)) except", "in records if any(r.get(\"urls\"))] def build_furl(url: str) -> furl.furl: try:", "record_get_urls_matching(record: {}, pattern: str) -> [{}]: result = [] for", "urls for host: '{host}'\", file=sys.stderr) def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str,", "pattern: str): record[\"urls\"] = record_get_urls_matching(record, pattern) def earliest_year(year_strings: [str]) ->", "pattern: str) -> bool: return any(record_get_urls_matching(record, pattern)) def record_get_urls_matching(record: {},", "host1, host2) exit(0) # do some selection based on a", "this is called # print each line as a csv", "record if args.select_by_url: pattern = args.select_by_url records = [r for", "record_remove_urls_not_matching(record: {}, pattern: str): record[\"urls\"] = record_get_urls_matching(record, pattern) def earliest_year(year_strings:", "str(sorted(years)[0]) if any(years) else \"\" def main(args: argparse.Namespace): with open(args.scrape_file,", "args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0) # check in how many records", "= args.select_by_url records = [r for r in records if", "years.append(int(year_s)) except ValueError: print(f\"Not a string that is a year:", "or urls, then exit if args.print_host_urls or args.print_common_hosts >= 0:", "int): hosts = [h for h in hosts_to_urls.keys() if len(hosts_to_urls[h])", "print information about the records parser.add_argument(\"--select-by-url\", type=str, help=\"Give a pattern", "for url in record.get(\"urls\"): if any(re.findall(pattern, url.get(\"url\"))): result.append(url) return result", "{}, host: str): urls = hosts_to_urls.get(host, []) for url in", "writer.writerow(to_print) if __name__ == '__main__': parser = argparse.ArgumentParser( description=\"Process a", "parser.add_argument(\"--select-by-url\", type=str, help=\"Give a pattern for a url to select", "parser = argparse.ArgumentParser( description=\"Process a file with zenon json records", "earliest publication year for each of the selected records.\") parser.add_argument(\"--print-id\",", "if args.print_pub_date: to_print.append(earliest_year(record.get(\"publicationDates\", []))) if args.print_languages: to_print.append(\"|\".join(record.get(\"languages\", []))) writer.writerow(to_print) if", "be ok, to only pattern match the hosts here... ids1", "record[\"urls\"] = [url for url in record.get(\"urls\") if url.get(\"desc\") not", "in records: to_print = [] if args.print_id: to_print.append(record.get(\"id\", \"\")) if", "exit if args.print_host_urls or args.print_common_hosts >= 0: hosts_to_urls = build_hosts_to_urls(records)", "match the hosts here... ids1 = {r.get(\"id\") for r in", "args.print_host_urls or args.print_common_hosts >= 0: hosts_to_urls = build_hosts_to_urls(records) if args.print_common_hosts", "> n] hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h])) for host", "# print each line as a csv column records =", "for url in record.get(\"urls\") if url.get(\"desc\") not in filters] records", "line in the file.\") # these are arguments to print", "then exit.\") parser.add_argument(\"--patterns-cooccur\", type=str, help=\"Format: 'pattern1,pattern2', print how often these", "[] for url in record.get(\"urls\"): if any(re.findall(pattern, url.get(\"url\"))): result.append(url) return", "be extra sure, that we get the same order every", "def filter_records_without_url(records: []) -> []: return [r for r in", "urls from the record if args.select_by_url: pattern = args.select_by_url records", "records.\") parser.add_argument(\"--print-id\", action=\"store_true\", help=\"Print the selected records' ids\") parser.add_argument(\"--print-languages\", action=\"store_true\",", "pattern for a url to select records by.\") parser.add_argument(\"--print-url\", action=\"store_true\",", "record_remove_urls_not_matching(record, pattern) # sort the records by id, to be", "urls for the selected records. (Ignores other urls present on", "for the selected records. (Ignores other urls present on the", "records = sorted(records, key=lambda r: r.get(\"id\")) writer = csv.writer(sys.stdout, delimiter=\",\",", "'pattern1,pattern2', print how often these occur in single records url", "return furl_obj except ValueError: return furl.furl(\"https://invalid-url.xyz\") def determine_host(url: str) ->", "to_print.append(record.get(\"urls\")[0].get(\"url\") if any(record.get(\"urls\")) else \"\") if args.print_pub_date: to_print.append(earliest_year(record.get(\"publicationDates\", []))) if", "parser.add_argument(\"--print-url\", action=\"store_true\", help=\"Print the first of each urls for the", "by.\") parser.add_argument(\"--print-url\", action=\"store_true\", help=\"Print the first of each urls for", "on the records if --select-url is given.)\") parser.add_argument(\"--print-pub-date\", action=\"store_true\", help=\"Print", "if any(re.findall(pattern, url.get(\"url\"))): result.append(url) return result def record_remove_urls_not_matching(record: {}, pattern:", "file that contains the zenon dumps as json.\") parser.add_argument(\"--desc-filters\", type=str,", "together select by a url pattern then print information about", "the same order every time this is called # print", "r in records if record_has_matching_url(r, pattern1)} ids2 = {r.get(\"id\") for", "than n times in the records urls, then exit.\") parser.add_argument(\"--print-host-urls\",", "host in hosts: print(\"% 6d\\t%s\" % (len(hosts_to_urls[host]), host)) def print_urls_for_host(hosts_to_urls:", "a csv column records = sorted(records, key=lambda r: r.get(\"id\")) writer", "json records and print some information about them.\") parser.add_argument(\"scrape_file\", type=str,", "args.print_common_hosts >= 0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0)", "records url fields, then exit.\") # these are meant to", "str) -> str: furl_obj = build_furl(url) return re.sub(r\"^www[0-9]*\\.\", \"\", furl_obj.host)", "selected records. (Ignores other urls present on the records if", "result def record_remove_urls_not_matching(record: {}, pattern: str): record[\"urls\"] = record_get_urls_matching(record, pattern)", "= [] for year_s in year_strings: try: years.append(int(year_s)) except ValueError:", "{number}\") def record_has_matching_url(record: {}, pattern: str) -> bool: return any(record_get_urls_matching(record,", "only pattern match the hosts here... ids1 = {r.get(\"id\") for", "def print_urls_for_host(hosts_to_urls: {}, host: str): urls = hosts_to_urls.get(host, []) for", "args.patterns_cooccur.split(\",\") print_how_often_url_patterns_cooccur(records, host1, host2) exit(0) # do some selection based", "each line as a csv column records = sorted(records, key=lambda", "except ValueError: return furl.furl(\"https://invalid-url.xyz\") def determine_host(url: str) -> str: furl_obj", "record.get(\"urls\"): host = determine_host(url.get(\"url\")) result[host].add(url.get(\"url\")) return result def print_most_common_url_hosts(hosts_to_urls: {},", "+ url) return furl_obj except ValueError: return furl.furl(\"https://invalid-url.xyz\") def determine_host(url:", "[]) for url in urls: print(url) if not any(urls): print(f\"No", "type=str, help=\"A file to filter urls by. Excludes urls with", "= filter_records_without_url(records) # filter urls by the user-provided filter list", "each of the selected records.\") parser.add_argument(\"--print-id\", action=\"store_true\", help=\"Print the selected", "print_urls_for_host(hosts_to_urls: {}, host: str): urls = hosts_to_urls.get(host, []) for url", "0: hosts_to_urls = build_hosts_to_urls(records) if args.print_common_hosts >= 0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts)", "in records if record_has_matching_url(r, pattern)] for record in records: record_remove_urls_not_matching(record,", "exit.\") # these are meant to work together select by", "if not any(urls): print(f\"No urls for host: '{host}'\", file=sys.stderr) def", "hosts co-occur, then exit if args.patterns_cooccur: host1, host2 = args.patterns_cooccur.split(\",\")", "def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str): # It should", "{r.get(\"id\") for r in records if record_has_matching_url(r, pattern2)} ids_both =", "r in records if record_has_matching_url(r, pattern)] for record in records:", "print(f\"Not a string that is a year: '{year_s}'\", file=sys.stderr) continue", "any(r.get(\"urls\"))] def build_furl(url: str) -> furl.furl: try: furl_obj = furl.furl(url)", "some specific information parser.add_argument(\"--print-common-hosts\", type=int, default=-1, help=\"Print hosts that appear", ">= 0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0) #", "not furl_obj.host: furl_obj = furl.furl(\"http://\" + url) return furl_obj except", "= furl.furl(\"http://\" + url) return furl_obj except ValueError: return furl.furl(\"https://invalid-url.xyz\")", "json import re import sys from collections import defaultdict def", "continue return str(sorted(years)[0]) if any(years) else \"\" def main(args: argparse.Namespace):", "then exit if args.patterns_cooccur: host1, host2 = args.patterns_cooccur.split(\",\") print_how_often_url_patterns_cooccur(records, host1,", "len(ids1), pattern2: len(ids2), \"both\": len(ids_both)}.items(): print(f\"{host}: {number}\") def record_has_matching_url(record: {},", "print_how_often_url_patterns_cooccur(records, host1, host2) exit(0) # do some selection based on", "on a url pattern, remove all non-matching urls from the", "__name__ == '__main__': parser = argparse.ArgumentParser( description=\"Process a file with", "{str}}: result = defaultdict(set) for record in records: for url", "urls present on the records if --select-url is given.)\") parser.add_argument(\"--print-pub-date\",", "fields matching a line in the file.\") # these are", "n=args.print_common_hosts) elif args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0) # check in how", "parser.add_argument(\"--print-common-hosts\", type=int, default=-1, help=\"Print hosts that appear more than n", "% (len(hosts_to_urls[host]), host)) def print_urls_for_host(hosts_to_urls: {}, host: str): urls =", "[{}], pattern1: str, pattern2: str): # It should be ok,", "the hosts here... ids1 = {r.get(\"id\") for r in records", "str): record[\"urls\"] = record_get_urls_matching(record, pattern) def earliest_year(year_strings: [str]) -> str:", "args.patterns_cooccur: host1, host2 = args.patterns_cooccur.split(\",\") print_how_often_url_patterns_cooccur(records, host1, host2) exit(0) #", "sorted(hosts, key=lambda h: len(hosts_to_urls[h])) for host in hosts: print(\"% 6d\\t%s\"", "string that is a year: '{year_s}'\", file=sys.stderr) continue return str(sorted(years)[0])", "a year: '{year_s}'\", file=sys.stderr) continue return str(sorted(years)[0]) if any(years) else", "'desc' fields matching a line in the file.\") # these", "-> str: years = [] for year_s in year_strings: try:", "str) -> furl.furl: try: furl_obj = furl.furl(url) if not furl_obj.host:", "urls, then exit.\") parser.add_argument(\"--print-host-urls\", type=str, help=\"Print all urls for the", "return any(record_get_urls_matching(record, pattern)) def record_get_urls_matching(record: {}, pattern: str) -> [{}]:", "by the user-provided filter list if args.desc_filters: with open(args.desc_filters, \"r\")", "that is a year: '{year_s}'\", file=sys.stderr) continue return str(sorted(years)[0]) if", "for each of the selected records.\") parser.add_argument(\"--print-id\", action=\"store_true\", help=\"Print the", "records urls, then exit.\") parser.add_argument(\"--print-host-urls\", type=str, help=\"Print all urls for" ]
[ "via pants via the portion of the spec following the", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "the colon sources: A list of paths containing the Antlr", "compiler used to compile the ANTLR files. Currently only supports", "against. compiler: The name of the compiler used to compile", "'antlr4'\"\"\" ExportableJvmLibrary.__init__(self, name, sources, provides, dependencies, excludes) self.add_labels('codegen') if compiler", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "spec following the colon sources: A list of paths containing", "agreed to in writing, software # distributed under the License", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "permissions and # limitations under the License. # ================================================================================================== __author__", "['antlr3', 'antlr4']: raise ValueError(\"Illegal value for 'compiler': {}\".format(compiler)) self.compiler =", "compiler = 'antlr3'): \"\"\"name: The name of this module target,", "dependencies: An optional list of Dependency objects specifying the binary", "distributed under the License is distributed on an \"AS IS\"", "this module target, addressable via pants via the portion of", "None, excludes = None, compiler = 'antlr3'): \"\"\"name: The name", "def __init__(self, name, sources, provides = None, dependencies = None,", "objects specifying the binary (jar) dependencies of this module. excludes:", "None, dependencies = None, excludes = None, compiler = 'antlr3'):", "of dependency exclude patterns to filter all of this module's", "= None, dependencies = None, excludes = None, compiler =", "supports 'antlr3' and 'antlr4'\"\"\" ExportableJvmLibrary.__init__(self, name, sources, provides, dependencies, excludes)", "module target, addressable via pants via the portion of the", "obtain a copy of the License in the LICENSE file,", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "the portion of the spec following the colon sources: A", "# you may not use this work except in compliance", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "Currently only supports 'antlr3' and 'antlr4'\"\"\" ExportableJvmLibrary.__init__(self, name, sources, provides,", "excludes: An optional list of dependency exclude patterns to filter", "except in compliance with the License. # You may obtain", "not in ['antlr3', 'antlr4']: raise ValueError(\"Illegal value for 'compiler': {}\".format(compiler))", "via the portion of the spec following the colon sources:", "object indicating the The ivy artifact to export dependencies: An", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "binary (jar) dependencies of this module. excludes: An optional list", "ivy artifact to export dependencies: An optional list of Dependency", "the compiler used to compile the ANTLR files. Currently only", "# ================================================================================================== # Copyright 2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- #", "indicating the The ivy artifact to export dependencies: An optional", "================================================================================================== # Copyright 2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed", "from an Antlr grammar file.\"\"\" def __init__(self, name, sources, provides", "import ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary): \"\"\"Defines a target that builds java", "writing, software # distributed under the License is distributed on", "LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # #", "in writing, software # distributed under the License is distributed", "dependencies of this module. excludes: An optional list of dependency", "the The ivy artifact to export dependencies: An optional list", "name, sources, provides, dependencies, excludes) self.add_labels('codegen') if compiler not in", "Dependency objects specifying the binary (jar) dependencies of this module.", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "name of the compiler used to compile the ANTLR files.", "optional list of Dependency objects specifying the binary (jar) dependencies", "in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 #", "governing permissions and # limitations under the License. # ==================================================================================================", "'antlr3' and 'antlr4'\"\"\" ExportableJvmLibrary.__init__(self, name, sources, provides, dependencies, excludes) self.add_labels('codegen')", "builds java stubs from an Antlr grammar file.\"\"\" def __init__(self,", "module's transitive dependencies against. compiler: The name of the compiler", "only supports 'antlr3' and 'antlr4'\"\"\" ExportableJvmLibrary.__init__(self, name, sources, provides, dependencies,", "ValueError(\"Illegal value for 'compiler': {}\".format(compiler)) self.compiler = compiler def _as_jar_dependency(self):", "-------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "2.0 (the \"License\"); # you may not use this work", "= None, compiler = 'antlr3'): \"\"\"name: The name of this", "list of Dependency objects specifying the binary (jar) dependencies of", "the License in the LICENSE file, or at: # #", "and 'antlr4'\"\"\" ExportableJvmLibrary.__init__(self, name, sources, provides, dependencies, excludes) self.add_labels('codegen') if", "the binary (jar) dependencies of this module. excludes: An optional", "CONDITIONS OF ANY KIND, either express or implied. # See", "JavaAntlrLibrary(ExportableJvmLibrary): \"\"\"Defines a target that builds java stubs from an", "module's jar is compiled from provides: An optional Dependency object", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "limitations under the License. # ================================================================================================== __author__ = '<NAME>' from", "from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary): \"\"\"Defines a target that", "grammar file.\"\"\" def __init__(self, name, sources, provides = None, dependencies", "Antlr source files this module's jar is compiled from provides:", "this module. excludes: An optional list of dependency exclude patterns", "for 'compiler': {}\".format(compiler)) self.compiler = compiler def _as_jar_dependency(self): return ExportableJvmLibrary._as_jar_dependency(self).with_sources()", "__init__(self, name, sources, provides = None, dependencies = None, excludes", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "filter all of this module's transitive dependencies against. compiler: The", "\"License\"); # you may not use this work except in", "and # limitations under the License. # ================================================================================================== __author__ =", "provides: An optional Dependency object indicating the The ivy artifact", "may obtain a copy of the License in the LICENSE", "under the License is distributed on an \"AS IS\" BASIS,", "addressable via pants via the portion of the spec following", "excludes = None, compiler = 'antlr3'): \"\"\"name: The name of", "License for the specific language governing permissions and # limitations", "'antlr3'): \"\"\"name: The name of this module target, addressable via", "(the \"License\"); # you may not use this work except", "An optional list of dependency exclude patterns to filter all", "dependencies, excludes) self.add_labels('codegen') if compiler not in ['antlr3', 'antlr4']: raise", "copy of the License in the LICENSE file, or at:", "(jar) dependencies of this module. excludes: An optional list of", "The name of the compiler used to compile the ANTLR", "# limitations under the License. # ================================================================================================== __author__ = '<NAME>'", "of the License in the LICENSE file, or at: #", "raise ValueError(\"Illegal value for 'compiler': {}\".format(compiler)) self.compiler = compiler def", "this module's jar is compiled from provides: An optional Dependency", "sources: A list of paths containing the Antlr source files", "if compiler not in ['antlr3', 'antlr4']: raise ValueError(\"Illegal value for", "transitive dependencies against. compiler: The name of the compiler used", "is compiled from provides: An optional Dependency object indicating the", "An optional list of Dependency objects specifying the binary (jar)", "the License for the specific language governing permissions and #", "================================================================================================== __author__ = '<NAME>' from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary):", "may not use this work except in compliance with the", "used to compile the ANTLR files. Currently only supports 'antlr3'", "None, compiler = 'antlr3'): \"\"\"name: The name of this module", "Apache License, Version 2.0 (the \"License\"); # you may not", "export dependencies: An optional list of Dependency objects specifying the", "excludes) self.add_labels('codegen') if compiler not in ['antlr3', 'antlr4']: raise ValueError(\"Illegal", "either express or implied. # See the License for the", "of paths containing the Antlr source files this module's jar", "OR CONDITIONS OF ANY KIND, either express or implied. #", "not use this work except in compliance with the License.", "following the colon sources: A list of paths containing the", "a target that builds java stubs from an Antlr grammar", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the License is distributed on an \"AS IS\" BASIS, #", "# Copyright 2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under", "patterns to filter all of this module's transitive dependencies against.", "this module's transitive dependencies against. compiler: The name of the", "in compliance with the License. # You may obtain a", "the Antlr source files this module's jar is compiled from", "of this module's transitive dependencies against. compiler: The name of", "jar is compiled from provides: An optional Dependency object indicating", "of Dependency objects specifying the binary (jar) dependencies of this", "compiler not in ['antlr3', 'antlr4']: raise ValueError(\"Illegal value for 'compiler':", "list of dependency exclude patterns to filter all of this", "software # distributed under the License is distributed on an", "ExportableJvmLibrary.__init__(self, name, sources, provides, dependencies, excludes) self.add_labels('codegen') if compiler not", "= None, excludes = None, compiler = 'antlr3'): \"\"\"name: The", "License. # ================================================================================================== __author__ = '<NAME>' from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary", "provides = None, dependencies = None, excludes = None, compiler", "twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary): \"\"\"Defines a target that builds", "under the License. # ================================================================================================== __author__ = '<NAME>' from twitter.pants.targets.exportable_jvm_library", "ANTLR files. Currently only supports 'antlr3' and 'antlr4'\"\"\" ExportableJvmLibrary.__init__(self, name,", "# # Unless required by applicable law or agreed to", "use this work except in compliance with the License. #", "containing the Antlr source files this module's jar is compiled", "# You may obtain a copy of the License in", "Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License,", "source files this module's jar is compiled from provides: An", "provides, dependencies, excludes) self.add_labels('codegen') if compiler not in ['antlr3', 'antlr4']:", "Version 2.0 (the \"License\"); # you may not use this", "optional list of dependency exclude patterns to filter all of", "ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary): \"\"\"Defines a target that builds java stubs", "java stubs from an Antlr grammar file.\"\"\" def __init__(self, name,", "to filter all of this module's transitive dependencies against. compiler:", "compile the ANTLR files. Currently only supports 'antlr3' and 'antlr4'\"\"\"", "law or agreed to in writing, software # distributed under", "Antlr grammar file.\"\"\" def __init__(self, name, sources, provides = None,", "of this module target, addressable via pants via the portion", "list of paths containing the Antlr source files this module's", "optional Dependency object indicating the The ivy artifact to export", "compiled from provides: An optional Dependency object indicating the The", "or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "file.\"\"\" def __init__(self, name, sources, provides = None, dependencies =", "The name of this module target, addressable via pants via", "name of this module target, addressable via pants via the", "compiler: The name of the compiler used to compile the", "paths containing the Antlr source files this module's jar is", "implied. # See the License for the specific language governing", "all of this module's transitive dependencies against. compiler: The name", "dependencies against. compiler: The name of the compiler used to", "of the spec following the colon sources: A list of", "under the Apache License, Version 2.0 (the \"License\"); # you", "artifact to export dependencies: An optional list of Dependency objects", "in ['antlr3', 'antlr4']: raise ValueError(\"Illegal value for 'compiler': {}\".format(compiler)) self.compiler", "value for 'compiler': {}\".format(compiler)) self.compiler = compiler def _as_jar_dependency(self): return", "= 'antlr3'): \"\"\"name: The name of this module target, addressable", "'<NAME>' from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary): \"\"\"Defines a target", "a copy of the License in the LICENSE file, or", "an Antlr grammar file.\"\"\" def __init__(self, name, sources, provides =", "__author__ = '<NAME>' from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary): \"\"\"Defines", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "colon sources: A list of paths containing the Antlr source", "'antlr4']: raise ValueError(\"Illegal value for 'compiler': {}\".format(compiler)) self.compiler = compiler", "target, addressable via pants via the portion of the spec", "target that builds java stubs from an Antlr grammar file.\"\"\"", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "that builds java stubs from an Antlr grammar file.\"\"\" def", "stubs from an Antlr grammar file.\"\"\" def __init__(self, name, sources,", "\"\"\"name: The name of this module target, addressable via pants", "this work except in compliance with the License. # You", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "portion of the spec following the colon sources: A list", "exclude patterns to filter all of this module's transitive dependencies", "to compile the ANTLR files. Currently only supports 'antlr3' and", "An optional Dependency object indicating the The ivy artifact to", "of the compiler used to compile the ANTLR files. Currently", "the License. # You may obtain a copy of the", "for the specific language governing permissions and # limitations under", "from provides: An optional Dependency object indicating the The ivy", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "dependencies = None, excludes = None, compiler = 'antlr3'): \"\"\"name:", "Copyright 2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the", "sources, provides, dependencies, excludes) self.add_labels('codegen') if compiler not in ['antlr3',", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "You may obtain a copy of the License in the", "License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0", "specifying the binary (jar) dependencies of this module. excludes: An", "work except in compliance with the License. # You may", "pants via the portion of the spec following the colon", "# ================================================================================================== __author__ = '<NAME>' from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary class", "of this module. excludes: An optional list of dependency exclude", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "files this module's jar is compiled from provides: An optional", "2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache", "required by applicable law or agreed to in writing, software", "the License. # ================================================================================================== __author__ = '<NAME>' from twitter.pants.targets.exportable_jvm_library import", "# -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "\"\"\"Defines a target that builds java stubs from an Antlr", "A list of paths containing the Antlr source files this", "you may not use this work except in compliance with", "Dependency object indicating the The ivy artifact to export dependencies:", "with the License. # You may obtain a copy of", "the spec following the colon sources: A list of paths", "to export dependencies: An optional list of Dependency objects specifying", "self.add_labels('codegen') if compiler not in ['antlr3', 'antlr4']: raise ValueError(\"Illegal value", "class JavaAntlrLibrary(ExportableJvmLibrary): \"\"\"Defines a target that builds java stubs from", "the Apache License, Version 2.0 (the \"License\"); # you may", "sources, provides = None, dependencies = None, excludes = None,", "name, sources, provides = None, dependencies = None, excludes =", "dependency exclude patterns to filter all of this module's transitive", "= '<NAME>' from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary): \"\"\"Defines a", "module. excludes: An optional list of dependency exclude patterns to", "the ANTLR files. Currently only supports 'antlr3' and 'antlr4'\"\"\" ExportableJvmLibrary.__init__(self,", "Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version", "The ivy artifact to export dependencies: An optional list of", "files. Currently only supports 'antlr3' and 'antlr4'\"\"\" ExportableJvmLibrary.__init__(self, name, sources," ]
[ "def i_create_a_pca_from_dataset(step): dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, {'name': 'new", "{'name': 'new PCA'}) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location =", "#@step(r'I create a PCA from a dataset$') def i_create_a_pca_from_dataset(step): dataset", "a PCA from a dataset$') def i_create_a_pca_from_dataset(step): dataset = world.dataset.get('resource')", "int(code1) and status['code'] != int(code2)): time.sleep(3) assert_less(datetime.utcnow() - start, timedelta(seconds=delta))", "def i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I update the PCA name to \"(.*)\"$')", "and limitations # under the License. import time import json", "import FAULTY from bigml.api import get_status from read_pca_steps import i_get_the_pca", "world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca =", "i_create_a_pca_from_dataset(step) #@step(r'I update the PCA name to \"(.*)\"$') def i_update_pca_name(step,", "get_status(world.pca) while (status['code'] != int(code1) and status['code'] != int(code2)): time.sleep(3)", "FAULTY from bigml.api import get_status from read_pca_steps import i_get_the_pca #@step(r'the", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "specific language governing permissions and limitations # under the License.", "# not use this file except in compliance with the", "= world.api.update_pca(world.pca['resource'], {'name': name}) world.status = resource['code'] eq_(world.status, HTTP_ACCEPTED) world.location", "resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) def i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I update", "in compliance with the License. You may obtain # a", "import FINISHED from bigml.api import FAULTY from bigml.api import get_status", "You may obtain # a copy of the License at", "# # Copyright 2018-2020 BigML # # Licensed under the", "#!/usr/bin/env python # # Copyright 2018-2020 BigML # # Licensed", "i_get_the_pca(step, pca_id) status = get_status(world.pca) eq_(status['code'], int(code1)) #@step(r'I wait until", "timedelta from world import world from nose.tools import eq_, assert_less", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "bigml.api import get_status from read_pca_steps import i_get_the_pca #@step(r'the pca name", "permissions and limitations # under the License. import time import", "i_create_a_pca_from_dataset(step): dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, {'name': 'new PCA'})", "under the License is distributed on an \"AS IS\" BASIS,", "world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) def i_create_a_pca(step): i_create_a_pca_from_dataset(step)", "(\\d+)') def wait_until_pca_status_code_is(step, code1, code2, secs): start = datetime.utcnow() delta", "resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource'])", "code is either (\\d) or (-\\d) less than (\\d+)') def", "= datetime.utcnow() delta = int(secs) * world.delta pca_id = world.pca['resource']", "this file except in compliance with the License. You may", "time import json import os from datetime import datetime, timedelta", "pca_id = world.pca['resource'] i_get_the_pca(step, pca_id) status = get_status(world.pca) while (status['code']", "name): resource = world.api.update_pca(world.pca['resource'], {'name': name}) world.status = resource['code'] eq_(world.status,", "-*- #!/usr/bin/env python # # Copyright 2018-2020 BigML # #", "from bigml.api import FAULTY from bigml.api import get_status from read_pca_steps", "the PCA name to \"(.*)\"$') def i_update_pca_name(step, name): resource =", "software # distributed under the License is distributed on an", "# -*- coding: utf-8 -*- #!/usr/bin/env python # # Copyright", "(the \"License\"); you may # not use this file except", "import HTTP_CREATED from bigml.api import HTTP_ACCEPTED from bigml.api import FINISHED", "the PCA status code is either (\\d) or (-\\d) less", "file except in compliance with the License. You may obtain", "to \"(.*)\"$') def i_update_pca_name(step, name): resource = world.api.update_pca(world.pca['resource'], {'name': name})", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "create a PCA from a dataset$') def i_create_a_pca_from_dataset(step): dataset =", "until the PCA is ready less than (\\d+)') def the_pca_is_finished_in_less_than(step,", "under the Apache License, Version 2.0 (the \"License\"); you may", "limitations # under the License. import time import json import", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "to in writing, software # distributed under the License is", "name): pca_name = world.pca['name'] eq_(name, pca_name) #@step(r'I create a PCA", "datetime import datetime, timedelta from world import world from nose.tools", "or agreed to in writing, software # distributed under the", "world from nose.tools import eq_, assert_less from bigml.api import HTTP_CREATED", "required by applicable law or agreed to in writing, software", "= get_status(world.pca) while (status['code'] != int(code1) and status['code'] != int(code2)):", "nose.tools import eq_, assert_less from bigml.api import HTTP_CREATED from bigml.api", "Apache License, Version 2.0 (the \"License\"); you may # not", "PCA is ready less than (\\d+)') def the_pca_is_finished_in_less_than(step, secs): wait_until_pca_status_code_is(step,", "world.pcas.append(resource['resource']) #@step(r'I create a PCA from a dataset$') def i_create_a_pca_with_params(step,", "agreed to in writing, software # distributed under the License", "bigml.api import FAULTY from bigml.api import get_status from read_pca_steps import", "distributed under the License is distributed on an \"AS IS\"", "a dataset$') def i_create_a_pca_from_dataset(step): dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset,", "name is \"(.*)\"') def i_check_pca_name(step, name): pca_name = world.pca['name'] eq_(name,", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "ready less than (\\d+)') def the_pca_is_finished_in_less_than(step, secs): wait_until_pca_status_code_is(step, FINISHED, FAULTY,", "from bigml.api import FINISHED from bigml.api import FAULTY from bigml.api", "not use this file except in compliance with the License.", "writing, software # distributed under the License is distributed on", "eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) #@step(r'I", "pca_id) status = get_status(world.pca) eq_(status['code'], int(code1)) #@step(r'I wait until the", "PCA from a dataset$') def i_create_a_pca_with_params(step, params): params = json.loads(params)", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "the License. You may obtain # a copy of the", "governing permissions and limitations # under the License. import time", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "use this file except in compliance with the License. You", "resource['object'] world.pcas.append(resource['resource']) def i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I update the PCA name", "import world from nose.tools import eq_, assert_less from bigml.api import", "world.api.create_pca(dataset, {'name': 'new PCA'}) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location", "wait_until_pca_status_code_is(step, code1, code2, secs): start = datetime.utcnow() delta = int(secs)", "less than (\\d+)') def the_pca_is_finished_in_less_than(step, secs): wait_until_pca_status_code_is(step, FINISHED, FAULTY, secs)", "world.pca['name'] eq_(name, pca_name) #@step(r'I create a PCA from a dataset$')", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "KIND, either express or implied. See the # License for", "eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) def", "os from datetime import datetime, timedelta from world import world", "while (status['code'] != int(code1) and status['code'] != int(code2)): time.sleep(3) assert_less(datetime.utcnow()", "world.delta pca_id = world.pca['resource'] i_get_the_pca(step, pca_id) status = get_status(world.pca) while", "\"License\"); you may # not use this file except in", "!= int(code1) and status['code'] != int(code2)): time.sleep(3) assert_less(datetime.utcnow() - start,", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "express or implied. See the # License for the specific", "resource = world.api.create_pca(dataset, params) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location", "eq_, assert_less from bigml.api import HTTP_CREATED from bigml.api import HTTP_ACCEPTED", "the Apache License, Version 2.0 (the \"License\"); you may #", "from nose.tools import eq_, assert_less from bigml.api import HTTP_CREATED from", "world.pca = resource['object'] #@step(r'I wait until the PCA status code", "See the # License for the specific language governing permissions", "start, timedelta(seconds=delta)) i_get_the_pca(step, pca_id) status = get_status(world.pca) eq_(status['code'], int(code1)) #@step(r'I", "code2, secs): start = datetime.utcnow() delta = int(secs) * world.delta", "= resource['object'] world.pcas.append(resource['resource']) #@step(r'I create a PCA from a dataset$')", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\"(.*)\"') def i_check_pca_name(step, name): pca_name = world.pca['name'] eq_(name, pca_name) #@step(r'I", "HTTP_CREATED from bigml.api import HTTP_ACCEPTED from bigml.api import FINISHED from", "#@step(r'I wait until the PCA status code is either (\\d)", "name to \"(.*)\"$') def i_update_pca_name(step, name): resource = world.api.update_pca(world.pca['resource'], {'name':", "time.sleep(3) assert_less(datetime.utcnow() - start, timedelta(seconds=delta)) i_get_the_pca(step, pca_id) status = get_status(world.pca)", "law or agreed to in writing, software # distributed under", "implied. See the # License for the specific language governing", "import time import json import os from datetime import datetime,", "= resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) def i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I", "params = json.loads(params) dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, params)", "status = get_status(world.pca) while (status['code'] != int(code1) and status['code'] !=", "int(code1)) #@step(r'I wait until the PCA is ready less than", "from world import world from nose.tools import eq_, assert_less from", "pca name is \"(.*)\"') def i_check_pca_name(step, name): pca_name = world.pca['name']", "pca_id) status = get_status(world.pca) while (status['code'] != int(code1) and status['code']", "create a PCA from a dataset$') def i_create_a_pca_with_params(step, params): params", "datetime, timedelta from world import world from nose.tools import eq_,", "is \"(.*)\"') def i_check_pca_name(step, name): pca_name = world.pca['name'] eq_(name, pca_name)", "PCA'}) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca", "def wait_until_pca_status_code_is(step, code1, code2, secs): start = datetime.utcnow() delta =", "= world.dataset.get('resource') resource = world.api.create_pca(dataset, params) world.status = resource['code'] eq_(world.status,", "until the PCA status code is either (\\d) or (-\\d)", "eq_(name, pca_name) #@step(r'I create a PCA from a dataset$') def", "HTTP_CREATED) world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) #@step(r'I create", "'new PCA'}) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location']", "dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, {'name': 'new PCA'}) world.status", "#@step(r'I update the PCA name to \"(.*)\"$') def i_update_pca_name(step, name):", "status = get_status(world.pca) eq_(status['code'], int(code1)) #@step(r'I wait until the PCA", "less than (\\d+)') def wait_until_pca_status_code_is(step, code1, code2, secs): start =", "name}) world.status = resource['code'] eq_(world.status, HTTP_ACCEPTED) world.location = resource['location'] world.pca", "= resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) #@step(r'I create a PCA", "PCA from a dataset$') def i_create_a_pca_from_dataset(step): dataset = world.dataset.get('resource') resource", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "# # Licensed under the Apache License, Version 2.0 (the", "either (\\d) or (-\\d) less than (\\d+)') def wait_until_pca_status_code_is(step, code1,", "= int(secs) * world.delta pca_id = world.pca['resource'] i_get_the_pca(step, pca_id) status", "obtain # a copy of the License at # #", "json import os from datetime import datetime, timedelta from world", "BigML # # Licensed under the Apache License, Version 2.0", "HTTP_CREATED) world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) def i_create_a_pca(step):", "Version 2.0 (the \"License\"); you may # not use this", "bigml.api import HTTP_CREATED from bigml.api import HTTP_ACCEPTED from bigml.api import", "# Copyright 2018-2020 BigML # # Licensed under the Apache", "get_status(world.pca) eq_(status['code'], int(code1)) #@step(r'I wait until the PCA is ready", "License for the specific language governing permissions and limitations #", "a PCA from a dataset$') def i_create_a_pca_with_params(step, params): params =", "assert_less from bigml.api import HTTP_CREATED from bigml.api import HTTP_ACCEPTED from", "start = datetime.utcnow() delta = int(secs) * world.delta pca_id =", "= resource['location'] world.pca = resource['object'] #@step(r'I wait until the PCA", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) #@step(r'I create a PCA from", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "from bigml.api import HTTP_ACCEPTED from bigml.api import FINISHED from bigml.api", "{'name': name}) world.status = resource['code'] eq_(world.status, HTTP_ACCEPTED) world.location = resource['location']", "world.pcas.append(resource['resource']) def i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I update the PCA name to", "FINISHED from bigml.api import FAULTY from bigml.api import get_status from", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "world.location = resource['location'] world.pca = resource['object'] #@step(r'I wait until the", "assert_less(datetime.utcnow() - start, timedelta(seconds=delta)) i_get_the_pca(step, pca_id) status = get_status(world.pca) eq_(status['code'],", "import json import os from datetime import datetime, timedelta from", "world.api.create_pca(dataset, params) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location']", "wait until the PCA is ready less than (\\d+)') def", "the PCA is ready less than (\\d+)') def the_pca_is_finished_in_less_than(step, secs):", "dataset$') def i_create_a_pca_with_params(step, params): params = json.loads(params) dataset = world.dataset.get('resource')", "read_pca_steps import i_get_the_pca #@step(r'the pca name is \"(.*)\"') def i_check_pca_name(step,", "2018-2020 BigML # # Licensed under the Apache License, Version", "#@step(r'the pca name is \"(.*)\"') def i_check_pca_name(step, name): pca_name =", "compliance with the License. You may obtain # a copy", "params) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca", "and status['code'] != int(code2)): time.sleep(3) assert_less(datetime.utcnow() - start, timedelta(seconds=delta)) i_get_the_pca(step,", "dataset$') def i_create_a_pca_from_dataset(step): dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, {'name':", "or (-\\d) less than (\\d+)') def wait_until_pca_status_code_is(step, code1, code2, secs):", "i_update_pca_name(step, name): resource = world.api.update_pca(world.pca['resource'], {'name': name}) world.status = resource['code']", "HTTP_ACCEPTED) world.location = resource['location'] world.pca = resource['object'] #@step(r'I wait until", "world.pca = resource['object'] world.pcas.append(resource['resource']) #@step(r'I create a PCA from a", "i_create_a_pca_with_params(step, params): params = json.loads(params) dataset = world.dataset.get('resource') resource =", "the # License for the specific language governing permissions and", "# # Unless required by applicable law or agreed to", "update the PCA name to \"(.*)\"$') def i_update_pca_name(step, name): resource", "resource['object'] #@step(r'I wait until the PCA status code is either", "import os from datetime import datetime, timedelta from world import", "\"(.*)\"$') def i_update_pca_name(step, name): resource = world.api.update_pca(world.pca['resource'], {'name': name}) world.status", "eq_(world.status, HTTP_ACCEPTED) world.location = resource['location'] world.pca = resource['object'] #@step(r'I wait", "2.0 (the \"License\"); you may # not use this file", "from datetime import datetime, timedelta from world import world from", "= get_status(world.pca) eq_(status['code'], int(code1)) #@step(r'I wait until the PCA is", "-*- coding: utf-8 -*- #!/usr/bin/env python # # Copyright 2018-2020", "world.dataset.get('resource') resource = world.api.create_pca(dataset, {'name': 'new PCA'}) world.status = resource['code']", "by applicable law or agreed to in writing, software #", "= resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca = resource['object']", "than (\\d+)') def wait_until_pca_status_code_is(step, code1, code2, secs): start = datetime.utcnow()", "dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, params) world.status = resource['code']", "datetime.utcnow() delta = int(secs) * world.delta pca_id = world.pca['resource'] i_get_the_pca(step,", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "bigml.api import FINISHED from bigml.api import FAULTY from bigml.api import", "PCA name to \"(.*)\"$') def i_update_pca_name(step, name): resource = world.api.update_pca(world.pca['resource'],", "License. import time import json import os from datetime import", "(\\d) or (-\\d) less than (\\d+)') def wait_until_pca_status_code_is(step, code1, code2,", "secs): start = datetime.utcnow() delta = int(secs) * world.delta pca_id", "# under the License. import time import json import os", "import get_status from read_pca_steps import i_get_the_pca #@step(r'the pca name is", "= resource['code'] eq_(world.status, HTTP_ACCEPTED) world.location = resource['location'] world.pca = resource['object']", "#@step(r'I create a PCA from a dataset$') def i_create_a_pca_with_params(step, params):", "pca_name) #@step(r'I create a PCA from a dataset$') def i_create_a_pca_from_dataset(step):", "may obtain # a copy of the License at #", "resource['object'] world.pcas.append(resource['resource']) #@step(r'I create a PCA from a dataset$') def", "(-\\d) less than (\\d+)') def wait_until_pca_status_code_is(step, code1, code2, secs): start", "Unless required by applicable law or agreed to in writing,", "* world.delta pca_id = world.pca['resource'] i_get_the_pca(step, pca_id) status = get_status(world.pca)", "eq_(status['code'], int(code1)) #@step(r'I wait until the PCA is ready less", "is ready less than (\\d+)') def the_pca_is_finished_in_less_than(step, secs): wait_until_pca_status_code_is(step, FINISHED,", "code1, code2, secs): start = datetime.utcnow() delta = int(secs) *", "wait until the PCA status code is either (\\d) or", "the License. import time import json import os from datetime", "HTTP_ACCEPTED from bigml.api import FINISHED from bigml.api import FAULTY from", "= world.pca['name'] eq_(name, pca_name) #@step(r'I create a PCA from a", "applicable law or agreed to in writing, software # distributed", "bigml.api import HTTP_ACCEPTED from bigml.api import FINISHED from bigml.api import", "OF ANY KIND, either express or implied. See the #", "world.pca = resource['object'] world.pcas.append(resource['resource']) def i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I update the", "from bigml.api import get_status from read_pca_steps import i_get_the_pca #@step(r'the pca", "a dataset$') def i_create_a_pca_with_params(step, params): params = json.loads(params) dataset =", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "pca_name = world.pca['name'] eq_(name, pca_name) #@step(r'I create a PCA from", "resource['location'] world.pca = resource['object'] #@step(r'I wait until the PCA status", "from bigml.api import HTTP_CREATED from bigml.api import HTTP_ACCEPTED from bigml.api", "Copyright 2018-2020 BigML # # Licensed under the Apache License,", "from a dataset$') def i_create_a_pca_from_dataset(step): dataset = world.dataset.get('resource') resource =", "from read_pca_steps import i_get_the_pca #@step(r'the pca name is \"(.*)\"') def", "from a dataset$') def i_create_a_pca_with_params(step, params): params = json.loads(params) dataset", "status['code'] != int(code2)): time.sleep(3) assert_less(datetime.utcnow() - start, timedelta(seconds=delta)) i_get_the_pca(step, pca_id)", "utf-8 -*- #!/usr/bin/env python # # Copyright 2018-2020 BigML #", "resource = world.api.update_pca(world.pca['resource'], {'name': name}) world.status = resource['code'] eq_(world.status, HTTP_ACCEPTED)", "either express or implied. See the # License for the", "world.pca['resource'] i_get_the_pca(step, pca_id) status = get_status(world.pca) while (status['code'] != int(code1)", "= world.api.create_pca(dataset, {'name': 'new PCA'}) world.status = resource['code'] eq_(world.status, HTTP_CREATED)", "def i_check_pca_name(step, name): pca_name = world.pca['name'] eq_(name, pca_name) #@step(r'I create", "python # # Copyright 2018-2020 BigML # # Licensed under", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "world.api.update_pca(world.pca['resource'], {'name': name}) world.status = resource['code'] eq_(world.status, HTTP_ACCEPTED) world.location =", "may # not use this file except in compliance with", "= world.pca['resource'] i_get_the_pca(step, pca_id) status = get_status(world.pca) while (status['code'] !=", "i_get_the_pca(step, pca_id) status = get_status(world.pca) while (status['code'] != int(code1) and", "# License for the specific language governing permissions and limitations", "with the License. You may obtain # a copy of", "you may # not use this file except in compliance", "#@step(r'I wait until the PCA is ready less than (\\d+)')", "= resource['object'] #@step(r'I wait until the PCA status code is", "status code is either (\\d) or (-\\d) less than (\\d+)')", "import HTTP_ACCEPTED from bigml.api import FINISHED from bigml.api import FAULTY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "import eq_, assert_less from bigml.api import HTTP_CREATED from bigml.api import", "i_check_pca_name(step, name): pca_name = world.pca['name'] eq_(name, pca_name) #@step(r'I create a", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "world.status = resource['code'] eq_(world.status, HTTP_ACCEPTED) world.location = resource['location'] world.pca =", "!= int(code2)): time.sleep(3) assert_less(datetime.utcnow() - start, timedelta(seconds=delta)) i_get_the_pca(step, pca_id) status", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "= resource['object'] world.pcas.append(resource['resource']) def i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I update the PCA", "int(secs) * world.delta pca_id = world.pca['resource'] i_get_the_pca(step, pca_id) status =", "coding: utf-8 -*- #!/usr/bin/env python # # Copyright 2018-2020 BigML", "timedelta(seconds=delta)) i_get_the_pca(step, pca_id) status = get_status(world.pca) eq_(status['code'], int(code1)) #@step(r'I wait", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "for the specific language governing permissions and limitations # under", "resource['code'] eq_(world.status, HTTP_ACCEPTED) world.location = resource['location'] world.pca = resource['object'] #@step(r'I", "import i_get_the_pca #@step(r'the pca name is \"(.*)\"') def i_check_pca_name(step, name):", "except in compliance with the License. You may obtain #", "i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I update the PCA name to \"(.*)\"$') def", "language governing permissions and limitations # under the License. import", "License. You may obtain # a copy of the License", "int(code2)): time.sleep(3) assert_less(datetime.utcnow() - start, timedelta(seconds=delta)) i_get_the_pca(step, pca_id) status =", "ANY KIND, either express or implied. See the # License", "# distributed under the License is distributed on an \"AS", "under the License. import time import json import os from", "# Unless required by applicable law or agreed to in", "resource = world.api.create_pca(dataset, {'name': 'new PCA'}) world.status = resource['code'] eq_(world.status,", "json.loads(params) dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, params) world.status =", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "- start, timedelta(seconds=delta)) i_get_the_pca(step, pca_id) status = get_status(world.pca) eq_(status['code'], int(code1))", "def i_create_a_pca_with_params(step, params): params = json.loads(params) dataset = world.dataset.get('resource') resource", "PCA status code is either (\\d) or (-\\d) less than", "import datetime, timedelta from world import world from nose.tools import", "= world.dataset.get('resource') resource = world.api.create_pca(dataset, {'name': 'new PCA'}) world.status =", "= json.loads(params) dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, params) world.status", "is either (\\d) or (-\\d) less than (\\d+)') def wait_until_pca_status_code_is(step,", "def i_update_pca_name(step, name): resource = world.api.update_pca(world.pca['resource'], {'name': name}) world.status =", "delta = int(secs) * world.delta pca_id = world.pca['resource'] i_get_the_pca(step, pca_id)", "= world.api.create_pca(dataset, params) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location =", "i_get_the_pca #@step(r'the pca name is \"(.*)\"') def i_check_pca_name(step, name): pca_name", "(status['code'] != int(code1) and status['code'] != int(code2)): time.sleep(3) assert_less(datetime.utcnow() -", "get_status from read_pca_steps import i_get_the_pca #@step(r'the pca name is \"(.*)\"')", "world import world from nose.tools import eq_, assert_less from bigml.api", "world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) #@step(r'I create a", "world.dataset.get('resource') resource = world.api.create_pca(dataset, params) world.status = resource['code'] eq_(world.status, HTTP_CREATED)", "params): params = json.loads(params) dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset,", "or implied. See the # License for the specific language" ]
[ "'Back'] admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days = [] TOKEN =", "['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days = [] TOKEN = 'bot_<PASSWORD>' group_id =", "= '' options = ['Info', 'Check-in/Out', 'Edit games', 'Back'] admins", "'Check-in/Out', 'Edit games', 'Back'] admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days =", "['Info', 'Check-in/Out', 'Edit games', 'Back'] admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days", "'admin2_telegram_nickname'] avail_days = [] TOKEN = 'bot_<PASSWORD>' group_id = id_of_group_chat", "admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days = [] TOKEN = 'bot_<PASSWORD>'", "operation = '' options = ['Info', 'Check-in/Out', 'Edit games', 'Back']", "games', 'Back'] admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days = [] TOKEN", "days_of_week = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday'] operation = '' options = ['Info',", "'Edit games', 'Back'] admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days = []", "= ['admin1_telegram_nickname', 'admin2_telegram_nickname'] avail_days = [] TOKEN = 'bot_<PASSWORD>' group_id", "'Sunday'] operation = '' options = ['Info', 'Check-in/Out', 'Edit games',", "'' options = ['Info', 'Check-in/Out', 'Edit games', 'Back'] admins =", "options = ['Info', 'Check-in/Out', 'Edit games', 'Back'] admins = ['admin1_telegram_nickname',", "= ['Info', 'Check-in/Out', 'Edit games', 'Back'] admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname']", "<filename>config.py days_of_week = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday'] operation = '' options =", "= ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday'] operation = '' options = ['Info', 'Check-in/Out',", "['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday'] operation = '' options = ['Info', 'Check-in/Out', 'Edit" ]
[ "you like? (sourdough, rye, wheat, or white)\\n' bread_type = ip.inputChoice(['sourdough',", "wheat, or white)\\n' bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt)", "of protein would you like? (chicken, turkey, ham, or tofu)\\n'", "tomato, lettuce]) for item in sandwich: cost += get_cost(item) else:", "validate user input for sandwich preferences ''' import pyinputplus as", "= ip.inputYesNo(prompt='Would you like lettuce?\\n') like_cheese = ip.inputYesNo(prompt='Do you like", "prompt=protein_prompt) mayo = ip.inputYesNo(prompt='Would you like mayo?\\n') mustard = ip.inputYesNo(prompt='Would", "'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, # toppings return 'yes' in sandwich_builder(),", "item in sandwich: cost += get_cost(item) how_many_prompt = 'How many", "} return food_dict[food_name] def sandwich_builder(): print('Enter your sandwich preferences below:\\n')", "0.25 'no':0 # saying no to a topping costs nothing", "like cheese on your sandwich?\\n') if like_cheese is 'yes': cheese_prompt", "topping costs nothing } return food_dict[food_name] def sandwich_builder(): print('Enter your", "cheese on your sandwich?\\n') if like_cheese is 'yes': cheese_prompt =", "cost 0.25 'no':0 # saying no to a topping costs", "sandwich_builder(): print('Enter your sandwich preferences below:\\n') bread_prompt = 'What bread", "you like tomato?\\n') lettuce = ip.inputYesNo(prompt='Would you like lettuce?\\n') like_cheese", "'yes':0.25, # toppings return 'yes' in sandwich_builder(), so I made", "your sandwich preferences below:\\n') bread_prompt = 'What bread type would", "mayo = ip.inputYesNo(prompt='Would you like mayo?\\n') mustard = ip.inputYesNo(prompt='Would you", "'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, #", "like_cheese is 'yes': cheese_prompt = 'What kind of cheese would", "costs nothing } return food_dict[food_name] def sandwich_builder(): print('Enter your sandwich", "sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type, cheese_type, mayo,", "toppings return 'yes' in sandwich_builder(), so I made them all", "protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt) mayo = ip.inputYesNo(prompt='Would", "of items in sandwich_builder''' food_dict = { 'sourdough':1.75, 'rye':2.0, 'wheat':1.50,", "= 'What kind of cheese would you like? (cheddar, swiss,", "# saying no to a topping costs nothing } return", "0 sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato, lettuce]) for item", "= 'How many sandwiches would you like?\\n' how_many = ip.inputInt(min=1,", "prompt=cheese_prompt) sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type, cheese_type,", "no to a topping costs nothing } return food_dict[food_name] def", "''' import pyinputplus as ip def get_cost(food_name): '''gets the cost", "user input for sandwich preferences ''' import pyinputplus as ip", "import pyinputplus as ip def get_cost(food_name): '''gets the cost of", "mustard = ip.inputYesNo(prompt='Would you like mustard?\\n') tomato = ip.inputYesNo(prompt='Would you", "tomato?\\n') lettuce = ip.inputYesNo(prompt='Would you like lettuce?\\n') like_cheese = ip.inputYesNo(prompt='Do", "def sandwich_builder(): print('Enter your sandwich preferences below:\\n') bread_prompt = 'What", "like? (sourdough, rye, wheat, or white)\\n' bread_type = ip.inputChoice(['sourdough', 'rye',", "like? (cheddar, swiss, mozzarella)\\n' cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt)", "would you like? (sourdough, rye, wheat, or white)\\n' bread_type =", "sandwich: cost += get_cost(item) how_many_prompt = 'How many sandwiches would", "cost of items in sandwich_builder''' food_dict = { 'sourdough':1.75, 'rye':2.0,", "= ip.inputYesNo(prompt='Would you like tomato?\\n') lettuce = ip.inputYesNo(prompt='Would you like", "sandwich?\\n') if like_cheese is 'yes': cheese_prompt = 'What kind of", "ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt) sandwich = [] cost = 0", "sandwich preferences below:\\n') bread_prompt = 'What bread type would you", "get_cost(item) else: sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type,", "# toppings return 'yes' in sandwich_builder(), so I made them", "cost = 0 sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato, lettuce])", "cost += get_cost(item) how_many_prompt = 'How many sandwiches would you", "saying no to a topping costs nothing } return food_dict[food_name]", "in sandwich: cost += get_cost(item) else: sandwich = [] cost", "pyinputplus to validate user input for sandwich preferences ''' import", "'yes': cheese_prompt = 'What kind of cheese would you like?", "cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt) sandwich = [] cost", "'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25,", "to validate user input for sandwich preferences ''' import pyinputplus", "(chicken, turkey, ham, or tofu)\\n' protein_type = ip.inputChoice(['chicken', 'turkey', 'ham',", "= [] cost = 0 sandwich.extend([bread_type, protein_type, mayo, mustard, tomato,", "'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, # toppings", "'yes' in sandwich_builder(), so I made them all cost 0.25", "would you like? (cheddar, swiss, mozzarella)\\n' cheese_type = ip.inputChoice(['cheddar', 'swiss',", "= 'What bread type would you like? (sourdough, rye, wheat,", "so I made them all cost 0.25 'no':0 # saying", "'What type of protein would you like? (chicken, turkey, ham,", "you like mustard?\\n') tomato = ip.inputYesNo(prompt='Would you like tomato?\\n') lettuce", "made them all cost 0.25 'no':0 # saying no to", "lettuce]) for item in sandwich: cost += get_cost(item) else: sandwich", "would you like? (chicken, turkey, ham, or tofu)\\n' protein_type =", "like tomato?\\n') lettuce = ip.inputYesNo(prompt='Would you like lettuce?\\n') like_cheese =", "'''gets the cost of items in sandwich_builder''' food_dict = {", "food_dict = { 'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0,", "<NAME> 2/20/21 sandwich-maker.py uses pyinputplus to validate user input for", "'rye', 'wheat', 'white'], prompt=bread_prompt) protein_prompt = 'What type of protein", "tomato = ip.inputYesNo(prompt='Would you like tomato?\\n') lettuce = ip.inputYesNo(prompt='Would you", "all cost 0.25 'no':0 # saying no to a topping", "sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato, lettuce]) for item in", "= ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt) protein_prompt = 'What type", "you like?\\n' how_many = ip.inputInt(min=1, prompt=how_many_prompt) print('\\nFinal cost: ${}'.format(round(cost *", "type would you like? (sourdough, rye, wheat, or white)\\n' bread_type", "preferences ''' import pyinputplus as ip def get_cost(food_name): '''gets the", "'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5,", "for sandwich preferences ''' import pyinputplus as ip def get_cost(food_name):", "mustard, tomato, lettuce]) for item in sandwich: cost += get_cost(item)", "you like? (cheddar, swiss, mozzarella)\\n' cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'],", "you like cheese on your sandwich?\\n') if like_cheese is 'yes':", "'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, # toppings return", "sandwiches would you like?\\n' how_many = ip.inputInt(min=1, prompt=how_many_prompt) print('\\nFinal cost:", "ip def get_cost(food_name): '''gets the cost of items in sandwich_builder'''", "'What kind of cheese would you like? (cheddar, swiss, mozzarella)\\n'", "'turkey', 'ham', 'tofu'], prompt=protein_prompt) mayo = ip.inputYesNo(prompt='Would you like mayo?\\n')", "= ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt) sandwich = [] cost =", "protein_type, cheese_type, mayo, mustard, tomato, lettuce]) for item in sandwich:", "sandwich_builder(), so I made them all cost 0.25 'no':0 #", "prompt=how_many_prompt) print('\\nFinal cost: ${}'.format(round(cost * how_many * 1.06, 2))) sandwich_builder()", "'ham', 'tofu'], prompt=protein_prompt) mayo = ip.inputYesNo(prompt='Would you like mayo?\\n') mustard", "you like lettuce?\\n') like_cheese = ip.inputYesNo(prompt='Do you like cheese on", "get_cost(item) how_many_prompt = 'How many sandwiches would you like?\\n' how_many", "print('Enter your sandwich preferences below:\\n') bread_prompt = 'What bread type", "of cheese would you like? (cheddar, swiss, mozzarella)\\n' cheese_type =", "ham, or tofu)\\n' protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt)", "cheese_type, mayo, mustard, tomato, lettuce]) for item in sandwich: cost", "+= get_cost(item) else: sandwich = [] cost = 0 sandwich.extend([bread_type,", "protein would you like? (chicken, turkey, ham, or tofu)\\n' protein_type", "for item in sandwich: cost += get_cost(item) else: sandwich =", "= [] cost = 0 sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard,", "bread_prompt = 'What bread type would you like? (sourdough, rye,", "would you like?\\n' how_many = ip.inputInt(min=1, prompt=how_many_prompt) print('\\nFinal cost: ${}'.format(round(cost", "return food_dict[food_name] def sandwich_builder(): print('Enter your sandwich preferences below:\\n') bread_prompt", "in sandwich: cost += get_cost(item) how_many_prompt = 'How many sandwiches", "how_many_prompt = 'How many sandwiches would you like?\\n' how_many =", "if like_cheese is 'yes': cheese_prompt = 'What kind of cheese", "'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, # toppings return 'yes' in sandwich_builder(), so", "= ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt) mayo = ip.inputYesNo(prompt='Would you", "get_cost(food_name): '''gets the cost of items in sandwich_builder''' food_dict =", "in sandwich_builder''' food_dict = { 'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0,", "tomato, lettuce]) for item in sandwich: cost += get_cost(item) how_many_prompt", "return 'yes' in sandwich_builder(), so I made them all cost", "''' <NAME> 2/20/21 sandwich-maker.py uses pyinputplus to validate user input", "preferences below:\\n') bread_prompt = 'What bread type would you like?", "ip.inputInt(min=1, prompt=how_many_prompt) print('\\nFinal cost: ${}'.format(round(cost * how_many * 1.06, 2)))", "'mozzarella':2.5, 'yes':0.25, # toppings return 'yes' in sandwich_builder(), so I", "= ip.inputYesNo(prompt='Would you like mustard?\\n') tomato = ip.inputYesNo(prompt='Would you like", "else: sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type, mayo,", "ip.inputYesNo(prompt='Would you like mustard?\\n') tomato = ip.inputYesNo(prompt='Would you like tomato?\\n')", "I made them all cost 0.25 'no':0 # saying no", "ip.inputYesNo(prompt='Would you like mayo?\\n') mustard = ip.inputYesNo(prompt='Would you like mustard?\\n')", "'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, # toppings return 'yes' in", "tofu)\\n' protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt) mayo =", "on your sandwich?\\n') if like_cheese is 'yes': cheese_prompt = 'What", "mozzarella)\\n' cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt) sandwich = []", "items in sandwich_builder''' food_dict = { 'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25,", "a topping costs nothing } return food_dict[food_name] def sandwich_builder(): print('Enter", "you like? (chicken, turkey, ham, or tofu)\\n' protein_type = ip.inputChoice(['chicken',", "[] cost = 0 sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce])", "sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type, mayo, mustard,", "+= get_cost(item) how_many_prompt = 'How many sandwiches would you like?\\n'", "cost += get_cost(item) else: sandwich = [] cost = 0", "ip.inputYesNo(prompt='Do you like cheese on your sandwich?\\n') if like_cheese is", "= 0 sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato, lettuce]) for", "protein_type, mayo, mustard, tomato, lettuce]) for item in sandwich: cost", "cheese would you like? (cheddar, swiss, mozzarella)\\n' cheese_type = ip.inputChoice(['cheddar',", "swiss, mozzarella)\\n' cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt) sandwich =", "sandwich_builder''' food_dict = { 'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50,", "them all cost 0.25 'no':0 # saying no to a", "'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, # toppings return 'yes'", "in sandwich_builder(), so I made them all cost 0.25 'no':0", "'no':0 # saying no to a topping costs nothing }", "or white)\\n' bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt) protein_prompt", "white)\\n' bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt) protein_prompt =", "your sandwich?\\n') if like_cheese is 'yes': cheese_prompt = 'What kind", "uses pyinputplus to validate user input for sandwich preferences '''", "'white'], prompt=bread_prompt) protein_prompt = 'What type of protein would you", "ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt) mayo = ip.inputYesNo(prompt='Would you like", "turkey, ham, or tofu)\\n' protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'],", "input for sandwich preferences ''' import pyinputplus as ip def", "lettuce = ip.inputYesNo(prompt='Would you like lettuce?\\n') like_cheese = ip.inputYesNo(prompt='Do you", "to a topping costs nothing } return food_dict[food_name] def sandwich_builder():", "kind of cheese would you like? (cheddar, swiss, mozzarella)\\n' cheese_type", "like?\\n' how_many = ip.inputInt(min=1, prompt=how_many_prompt) print('\\nFinal cost: ${}'.format(round(cost * how_many", "mustard?\\n') tomato = ip.inputYesNo(prompt='Would you like tomato?\\n') lettuce = ip.inputYesNo(prompt='Would", "as ip def get_cost(food_name): '''gets the cost of items in", "protein_prompt = 'What type of protein would you like? (chicken,", "or tofu)\\n' protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt) mayo", "mayo?\\n') mustard = ip.inputYesNo(prompt='Would you like mustard?\\n') tomato = ip.inputYesNo(prompt='Would", "sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce]) for item in sandwich:", "'swiss', 'mozzarella'], prompt=cheese_prompt) sandwich = [] cost = 0 sandwich.extend([bread_type,", "item in sandwich: cost += get_cost(item) else: sandwich = []", "lettuce?\\n') like_cheese = ip.inputYesNo(prompt='Do you like cheese on your sandwich?\\n')", "= 0 sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce]) for item", "many sandwiches would you like?\\n' how_many = ip.inputInt(min=1, prompt=how_many_prompt) print('\\nFinal", "(sourdough, rye, wheat, or white)\\n' bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat',", "{ 'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0,", "prompt=bread_prompt) protein_prompt = 'What type of protein would you like?", "is 'yes': cheese_prompt = 'What kind of cheese would you", "the cost of items in sandwich_builder''' food_dict = { 'sourdough':1.75,", "bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt) protein_prompt = 'What", "[] cost = 0 sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato,", "type of protein would you like? (chicken, turkey, ham, or", "mayo, mustard, tomato, lettuce]) for item in sandwich: cost +=", "pyinputplus as ip def get_cost(food_name): '''gets the cost of items", "= { 'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25,", "food_dict[food_name] def sandwich_builder(): print('Enter your sandwich preferences below:\\n') bread_prompt =", "'tofu'], prompt=protein_prompt) mayo = ip.inputYesNo(prompt='Would you like mayo?\\n') mustard =", "'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5,", "ip.inputYesNo(prompt='Would you like tomato?\\n') lettuce = ip.inputYesNo(prompt='Would you like lettuce?\\n')", "0 sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce]) for item in", "ip.inputYesNo(prompt='Would you like lettuce?\\n') like_cheese = ip.inputYesNo(prompt='Do you like cheese", "bread type would you like? (sourdough, rye, wheat, or white)\\n'", "cheese_prompt = 'What kind of cheese would you like? (cheddar,", "like? (chicken, turkey, ham, or tofu)\\n' protein_type = ip.inputChoice(['chicken', 'turkey',", "sandwich preferences ''' import pyinputplus as ip def get_cost(food_name): '''gets", "below:\\n') bread_prompt = 'What bread type would you like? (sourdough,", "you like mayo?\\n') mustard = ip.inputYesNo(prompt='Would you like mustard?\\n') tomato", "how_many = ip.inputInt(min=1, prompt=how_many_prompt) print('\\nFinal cost: ${}'.format(round(cost * how_many *", "'How many sandwiches would you like?\\n' how_many = ip.inputInt(min=1, prompt=how_many_prompt)", "like mayo?\\n') mustard = ip.inputYesNo(prompt='Would you like mustard?\\n') tomato =", "rye, wheat, or white)\\n' bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'],", "def get_cost(food_name): '''gets the cost of items in sandwich_builder''' food_dict", "sandwich-maker.py uses pyinputplus to validate user input for sandwich preferences", "'wheat', 'white'], prompt=bread_prompt) protein_prompt = 'What type of protein would", "ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt) protein_prompt = 'What type of", "= ip.inputYesNo(prompt='Do you like cheese on your sandwich?\\n') if like_cheese", "sandwich: cost += get_cost(item) else: sandwich = [] cost =", "for item in sandwich: cost += get_cost(item) how_many_prompt = 'How", "cost = 0 sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce]) for", "like lettuce?\\n') like_cheese = ip.inputYesNo(prompt='Do you like cheese on your", "like_cheese = ip.inputYesNo(prompt='Do you like cheese on your sandwich?\\n') if", "'What bread type would you like? (sourdough, rye, wheat, or", "'mozzarella'], prompt=cheese_prompt) sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type,", "(cheddar, swiss, mozzarella)\\n' cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt) sandwich", "= 'What type of protein would you like? (chicken, turkey,", "= ip.inputYesNo(prompt='Would you like mayo?\\n') mustard = ip.inputYesNo(prompt='Would you like", "nothing } return food_dict[food_name] def sandwich_builder(): print('Enter your sandwich preferences", "= ip.inputInt(min=1, prompt=how_many_prompt) print('\\nFinal cost: ${}'.format(round(cost * how_many * 1.06,", "2/20/21 sandwich-maker.py uses pyinputplus to validate user input for sandwich", "lettuce]) for item in sandwich: cost += get_cost(item) how_many_prompt =", "like mustard?\\n') tomato = ip.inputYesNo(prompt='Would you like tomato?\\n') lettuce =" ]
[ "Chapter from builder.containers.episode import Episode from builder.containers.scene import Scene from", "SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option, exp_opt) validate_with_fail(self, 'title_of', checker, data) def", "expect): tmp = hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect)", "] validate_with_fail(self, 'end_of', lambda src, expect: self.assertEqual( hd.HeaderUpdater()._end_of(src).cmd, expect), data)", "tests.testutils import print_testtitle, validate_with_fail from builder.commands.scode import SCode, SCmd from", "hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option, exp_opt) validate_with_fail(self,", "self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option, exp_opt) validate_with_fail(self, 'title_of', checker, data)", "def checker(src, expect): tmp = hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT)", "(True, Story('test',outline='apple'), ('apple',)), ] def checker(src, expect): tmp = hd.HeaderUpdater()._outline_of(src)", "test_outline_of(self): data = [ # (src, expect) (True, Story('test',outline='apple'), ('apple',)),", "self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option, exp_opt) validate_with_fail(self, 'title_of', checker, data) def test_outline_of(self):", "def checker(src, expect, exp_opt): tmp = hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd,", "import headerupdater as hd class HeaderUpdaterTest(unittest.TestCase): @classmethod def setUpClass(cls): print_testtitle(hd.__name__,", "'outline_of', checker, data) def test_end_of(self): data = [ # (src,", "# (src, expect) (True, Story('test',outline='apple'), ('apple',)), ] def checker(src, expect):", "= [ # (src, expect, exp_opt) (True, Story('test',), ('test',), 1),", "expect, exp_opt) (True, Story('test',), ('test',), 1), ] def checker(src, expect,", "''' HeaderUpdater class test ======================== ''' import unittest from tests.testutils", "class HeaderUpdaterTest(unittest.TestCase): @classmethod def setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater class') def test_instance(self):", "('test',), 1), ] def checker(src, expect, exp_opt): tmp = hd.HeaderUpdater()._title_of(src)", "exp_opt): tmp = hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect)", "= hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option, exp_opt)", "builder.containers.chapter import Chapter from builder.containers.episode import Episode from builder.containers.scene import", "checker(src, expect): tmp = hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script,", "import Story from builder.core import headerupdater as hd class HeaderUpdaterTest(unittest.TestCase):", "builder.commands.scode import SCode, SCmd from builder.containers.chapter import Chapter from builder.containers.episode", "checker, data) def test_outline_of(self): data = [ # (src, expect)", "Episode from builder.containers.scene import Scene from builder.containers.story import Story from", "exp_opt) (True, Story('test',), ('test',), 1), ] def checker(src, expect, exp_opt):", "checker, data) def test_end_of(self): data = [ # (src, expect)", "] def checker(src, expect): tmp = hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd,", "= [ # (src, expect) (True, Chapter('test',), SCmd.END_CHAPTER), ] validate_with_fail(self,", "from builder.containers.story import Story from builder.core import headerupdater as hd", "builder.containers.scene import Scene from builder.containers.story import Story from builder.core import", "builder.core import headerupdater as hd class HeaderUpdaterTest(unittest.TestCase): @classmethod def setUpClass(cls):", "def test_title_of(self): data = [ # (src, expect, exp_opt) (True,", "-*- ''' HeaderUpdater class test ======================== ''' import unittest from", "validate_with_fail from builder.commands.scode import SCode, SCmd from builder.containers.chapter import Chapter", "import Chapter from builder.containers.episode import Episode from builder.containers.scene import Scene", "Story('test',), ('test',), 1), ] def checker(src, expect, exp_opt): tmp =", "setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater class') def test_instance(self): tmp = hd.HeaderUpdater() self.assertIsInstance(tmp,", "SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect) validate_with_fail(self, 'outline_of', checker, data) def test_end_of(self): data", "expect) self.assertEqual(tmp.option, exp_opt) validate_with_fail(self, 'title_of', checker, data) def test_outline_of(self): data", "(True, Story('test',), ('test',), 1), ] def checker(src, expect, exp_opt): tmp", "class') def test_instance(self): tmp = hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater) def test_title_of(self):", "import SCode, SCmd from builder.containers.chapter import Chapter from builder.containers.episode import", "test_title_of(self): data = [ # (src, expect, exp_opt) (True, Story('test',),", "data = [ # (src, expect, exp_opt) (True, Story('test',), ('test',),", "hd class HeaderUpdaterTest(unittest.TestCase): @classmethod def setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater class') def", "unittest from tests.testutils import print_testtitle, validate_with_fail from builder.commands.scode import SCode,", "SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect) validate_with_fail(self, 'outline_of', checker, data) def", "def test_outline_of(self): data = [ # (src, expect) (True, Story('test',outline='apple'),", "exp_opt) validate_with_fail(self, 'title_of', checker, data) def test_outline_of(self): data = [", "self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect) validate_with_fail(self, 'outline_of', checker, data) def test_end_of(self):", "HeaderUpdater class test ======================== ''' import unittest from tests.testutils import", "test ======================== ''' import unittest from tests.testutils import print_testtitle, validate_with_fail", "checker(src, expect, exp_opt): tmp = hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE)", "hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect) validate_with_fail(self, 'outline_of', checker,", "import unittest from tests.testutils import print_testtitle, validate_with_fail from builder.commands.scode import", "(src, expect, exp_opt) (True, Story('test',), ('test',), 1), ] def checker(src,", "print_testtitle(hd.__name__, 'HeaderUpdater class') def test_instance(self): tmp = hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater)", "import Scene from builder.containers.story import Story from builder.core import headerupdater", "[ # (src, expect) (True, Story('test',outline='apple'), ('apple',)), ] def checker(src,", "test_instance(self): tmp = hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater) def test_title_of(self): data =", "('apple',)), ] def checker(src, expect): tmp = hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode)", "validate_with_fail(self, 'title_of', checker, data) def test_outline_of(self): data = [ #", "-*- coding: utf-8 -*- ''' HeaderUpdater class test ======================== '''", "coding: utf-8 -*- ''' HeaderUpdater class test ======================== ''' import", "def setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater class') def test_instance(self): tmp = hd.HeaderUpdater()", "(src, expect) (True, Story('test',outline='apple'), ('apple',)), ] def checker(src, expect): tmp", "expect) (True, Chapter('test',), SCmd.END_CHAPTER), ] validate_with_fail(self, 'end_of', lambda src, expect:", "'title_of', checker, data) def test_outline_of(self): data = [ # (src,", "class test ======================== ''' import unittest from tests.testutils import print_testtitle,", "headerupdater as hd class HeaderUpdaterTest(unittest.TestCase): @classmethod def setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater", "= hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater) def test_title_of(self): data = [ #", "[ # (src, expect, exp_opt) (True, Story('test',), ('test',), 1), ]", "validate_with_fail(self, 'outline_of', checker, data) def test_end_of(self): data = [ #", "from builder.containers.episode import Episode from builder.containers.scene import Scene from builder.containers.story", "SCmd from builder.containers.chapter import Chapter from builder.containers.episode import Episode from", "data) def test_outline_of(self): data = [ # (src, expect) (True,", "data) def test_end_of(self): data = [ # (src, expect) (True,", "as hd class HeaderUpdaterTest(unittest.TestCase): @classmethod def setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater class')", "''' import unittest from tests.testutils import print_testtitle, validate_with_fail from builder.commands.scode", "print_testtitle, validate_with_fail from builder.commands.scode import SCode, SCmd from builder.containers.chapter import", "from tests.testutils import print_testtitle, validate_with_fail from builder.commands.scode import SCode, SCmd", "SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option, exp_opt) validate_with_fail(self, 'title_of', checker,", "hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater) def test_title_of(self): data = [ # (src,", "def test_end_of(self): data = [ # (src, expect) (True, Chapter('test',),", "import print_testtitle, validate_with_fail from builder.commands.scode import SCode, SCmd from builder.containers.chapter", "expect) validate_with_fail(self, 'outline_of', checker, data) def test_end_of(self): data = [", "HeaderUpdaterTest(unittest.TestCase): @classmethod def setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater class') def test_instance(self): tmp", "[ # (src, expect) (True, Chapter('test',), SCmd.END_CHAPTER), ] validate_with_fail(self, 'end_of',", "======================== ''' import unittest from tests.testutils import print_testtitle, validate_with_fail from", "from builder.core import headerupdater as hd class HeaderUpdaterTest(unittest.TestCase): @classmethod def", "self.assertIsInstance(tmp, hd.HeaderUpdater) def test_title_of(self): data = [ # (src, expect,", "self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option, exp_opt) validate_with_fail(self, 'title_of',", "SCmd.END_CHAPTER), ] validate_with_fail(self, 'end_of', lambda src, expect: self.assertEqual( hd.HeaderUpdater()._end_of(src).cmd, expect),", "expect) (True, Story('test',outline='apple'), ('apple',)), ] def checker(src, expect): tmp =", "builder.containers.episode import Episode from builder.containers.scene import Scene from builder.containers.story import", "# (src, expect, exp_opt) (True, Story('test',), ('test',), 1), ] def", "expect, exp_opt): tmp = hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script,", "import Episode from builder.containers.scene import Scene from builder.containers.story import Story", "Story('test',outline='apple'), ('apple',)), ] def checker(src, expect): tmp = hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp,", "from builder.containers.scene import Scene from builder.containers.story import Story from builder.core", "data = [ # (src, expect) (True, Chapter('test',), SCmd.END_CHAPTER), ]", "def test_instance(self): tmp = hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater) def test_title_of(self): data", "from builder.containers.chapter import Chapter from builder.containers.episode import Episode from builder.containers.scene", "hd.HeaderUpdater) def test_title_of(self): data = [ # (src, expect, exp_opt)", "# (src, expect) (True, Chapter('test',), SCmd.END_CHAPTER), ] validate_with_fail(self, 'end_of', lambda", "self.assertEqual(tmp.script, expect) validate_with_fail(self, 'outline_of', checker, data) def test_end_of(self): data =", "Chapter('test',), SCmd.END_CHAPTER), ] validate_with_fail(self, 'end_of', lambda src, expect: self.assertEqual( hd.HeaderUpdater()._end_of(src).cmd,", "data = [ # (src, expect) (True, Story('test',outline='apple'), ('apple',)), ]", "tmp = hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect) validate_with_fail(self,", "] def checker(src, expect, exp_opt): tmp = hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode)", "tmp = hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option,", "@classmethod def setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater class') def test_instance(self): tmp =", "SCode, SCmd from builder.containers.chapter import Chapter from builder.containers.episode import Episode", "utf-8 -*- ''' HeaderUpdater class test ======================== ''' import unittest", "# -*- coding: utf-8 -*- ''' HeaderUpdater class test ========================", "<filename>tests/core/test_headerupdater.py # -*- coding: utf-8 -*- ''' HeaderUpdater class test", "Story from builder.core import headerupdater as hd class HeaderUpdaterTest(unittest.TestCase): @classmethod", "'HeaderUpdater class') def test_instance(self): tmp = hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater) def", "self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect) validate_with_fail(self, 'outline_of', checker, data)", "1), ] def checker(src, expect, exp_opt): tmp = hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp,", "(True, Chapter('test',), SCmd.END_CHAPTER), ] validate_with_fail(self, 'end_of', lambda src, expect: self.assertEqual(", "(src, expect) (True, Chapter('test',), SCmd.END_CHAPTER), ] validate_with_fail(self, 'end_of', lambda src,", "= hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect) validate_with_fail(self, 'outline_of',", "self.assertEqual(tmp.option, exp_opt) validate_with_fail(self, 'title_of', checker, data) def test_outline_of(self): data =", "from builder.commands.scode import SCode, SCmd from builder.containers.chapter import Chapter from", "= [ # (src, expect) (True, Story('test',outline='apple'), ('apple',)), ] def", "test_end_of(self): data = [ # (src, expect) (True, Chapter('test',), SCmd.END_CHAPTER),", "Scene from builder.containers.story import Story from builder.core import headerupdater as", "tmp = hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater) def test_title_of(self): data = [", "builder.containers.story import Story from builder.core import headerupdater as hd class" ]
[ "np.False_]), np.array([np.True_, np.True_, np.False_])] f = h5py.File(filename, 'x') # create", "rows[r] f.flush() f.close() f = h5py.File(filename, 'r') dsetr = f[\"vlen_matrix\"]", "np.False_])] f = h5py.File(filename, 'x') # create file, fails if", "= [np.array([np.True_, np.False_]), np.array([np.True_, np.True_, np.False_])] f = h5py.File(filename, 'x')", "compression=\"gzip\", compression_opts=9, fletcher32=True, dtype=vlen_data_type) for r in range(len(rows)): dset[r] =", "in range(len(rows)): dset[r] = rows[r] f.flush() f.close() f = h5py.File(filename,", "fails if exists vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset(\"vlen_matrix\", (2,),", "fletcher32=True, dtype=vlen_data_type) for r in range(len(rows)): dset[r] = rows[r] f.flush()", "if exists vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset(\"vlen_matrix\", (2,), compression=\"gzip\",", "rows = [np.array([np.True_, np.False_]), np.array([np.True_, np.True_, np.False_])] f = h5py.File(filename,", "compression_opts=9, fletcher32=True, dtype=vlen_data_type) for r in range(len(rows)): dset[r] = rows[r]", "= \"test_vlen_datasets_np_bool.h5\" rows = [np.array([np.True_, np.False_]), np.array([np.True_, np.True_, np.False_])] f", "file, fails if exists vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset(\"vlen_matrix\",", "f.create_dataset(\"vlen_matrix\", (2,), compression=\"gzip\", compression_opts=9, fletcher32=True, dtype=vlen_data_type) for r in range(len(rows)):", "dset[r] = rows[r] f.flush() f.close() f = h5py.File(filename, 'r') dsetr", "import numpy as np import h5py filename = \"test_vlen_datasets_np_bool.h5\" rows", "h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset(\"vlen_matrix\", (2,), compression=\"gzip\", compression_opts=9, fletcher32=True, dtype=vlen_data_type) for", "f.close() f = h5py.File(filename, 'r') dsetr = f[\"vlen_matrix\"] for r", "numpy as np import h5py filename = \"test_vlen_datasets_np_bool.h5\" rows =", "for r in range(len(rows)): dset[r] = rows[r] f.flush() f.close() f", "'x') # create file, fails if exists vlen_data_type = h5py.special_dtype(vlen=np.bool_)", "\"test_vlen_datasets_np_bool.h5\" rows = [np.array([np.True_, np.False_]), np.array([np.True_, np.True_, np.False_])] f =", "np import h5py filename = \"test_vlen_datasets_np_bool.h5\" rows = [np.array([np.True_, np.False_]),", "import h5py filename = \"test_vlen_datasets_np_bool.h5\" rows = [np.array([np.True_, np.False_]), np.array([np.True_,", "= h5py.File(filename, 'r') dsetr = f[\"vlen_matrix\"] for r in range(dsetr.shape[0]):", "h5py.File(filename, 'r') dsetr = f[\"vlen_matrix\"] for r in range(dsetr.shape[0]): print(dsetr[r])", "exists vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset(\"vlen_matrix\", (2,), compression=\"gzip\", compression_opts=9,", "[np.array([np.True_, np.False_]), np.array([np.True_, np.True_, np.False_])] f = h5py.File(filename, 'x') #", "filename = \"test_vlen_datasets_np_bool.h5\" rows = [np.array([np.True_, np.False_]), np.array([np.True_, np.True_, np.False_])]", "= rows[r] f.flush() f.close() f = h5py.File(filename, 'r') dsetr =", "f = h5py.File(filename, 'r') dsetr = f[\"vlen_matrix\"] for r in", "create file, fails if exists vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset =", "r in range(len(rows)): dset[r] = rows[r] f.flush() f.close() f =", "np.array([np.True_, np.True_, np.False_])] f = h5py.File(filename, 'x') # create file,", "h5py filename = \"test_vlen_datasets_np_bool.h5\" rows = [np.array([np.True_, np.False_]), np.array([np.True_, np.True_,", "vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset(\"vlen_matrix\", (2,), compression=\"gzip\", compression_opts=9, fletcher32=True,", "f.flush() f.close() f = h5py.File(filename, 'r') dsetr = f[\"vlen_matrix\"] for", "<gh_stars>1-10 import numpy as np import h5py filename = \"test_vlen_datasets_np_bool.h5\"", "dset = f.create_dataset(\"vlen_matrix\", (2,), compression=\"gzip\", compression_opts=9, fletcher32=True, dtype=vlen_data_type) for r", "= f.create_dataset(\"vlen_matrix\", (2,), compression=\"gzip\", compression_opts=9, fletcher32=True, dtype=vlen_data_type) for r in", "# create file, fails if exists vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset", "as np import h5py filename = \"test_vlen_datasets_np_bool.h5\" rows = [np.array([np.True_,", "range(len(rows)): dset[r] = rows[r] f.flush() f.close() f = h5py.File(filename, 'r')", "= h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset(\"vlen_matrix\", (2,), compression=\"gzip\", compression_opts=9, fletcher32=True, dtype=vlen_data_type)", "(2,), compression=\"gzip\", compression_opts=9, fletcher32=True, dtype=vlen_data_type) for r in range(len(rows)): dset[r]", "f = h5py.File(filename, 'x') # create file, fails if exists", "h5py.File(filename, 'x') # create file, fails if exists vlen_data_type =", "np.True_, np.False_])] f = h5py.File(filename, 'x') # create file, fails", "dtype=vlen_data_type) for r in range(len(rows)): dset[r] = rows[r] f.flush() f.close()", "= h5py.File(filename, 'x') # create file, fails if exists vlen_data_type" ]
[ "imVideo[vid].shape[2] # display video blnLoop = True fid = 0", "= (fid + 1) % maxFrames else: if k ==", "= cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB) if", "fmt metadata['FORMAT'] = fmt vmode = vidseq.get(cv2.CAP_PROP_MODE) if vmode >", "strWindowName[vid], '%d' % (curVideoFid + 1)) # look for \"esc\"", "1) % maxFrames elif k == 83: # right arrow", "have it in RGBA mode buf = np.roll(buf, 3, axis=2)", "= [] fid = 0 prevPercent = 0 print '\\n'", "writer = cv2.VideoWriter(filename, fourcc, fps=fps, frameSize=(width, height), isColor=isColor) print writer.isOpened()", "frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if resizeAmount: frame = scipy.misc.imresize(frame, resizeAmount)", "imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB)", "PIL from PIL import ImageDraw import angles import cv2 import", "range(len(imVideo)): if imVideo[vid].shape[-1] > maxFrames: maxFrames = imVideo[vid].shape[2] # display", "for vid in range(len(imVideo)): cv2.destroyWindow(strWindowName[vid]) def normalizeArray(a, bounds=None): if bounds", "curPercent > prevPercent: prevPercent = curPercent print '%.2d%%' % curPercent,", "(fid + 1) % maxFrames for vid in range(len(imVideo)): cv2.destroyWindow(strWindowName[vid])", "2), color=255): imInputPIL = PIL.Image.fromarray(imInput) d = ImageDraw.Draw(imInputPIL) d.text(loc, strText,", "fid += 1 curPercent = np.floor(100.0 * fid / numFrames)", "requested if resizeAmount: imCur = scipy.misc.imresize(imCur, resizeAmount) # show image", "if resizeAmount: imCur = scipy.misc.imresize(imCur, resizeAmount) # show image cvShowImage(imCur,", "if blnLoop: if k == 27: break elif k ==", "cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def mplotShowImage(imInput): plt.imshow(imInput, cmap=plt.cm.gray)", "= 0 while True: for vid in range(len(imVideo)): curVideoFid =", "import scipy.ndimage.filters import matplotlib.pyplot as plt import PIL from PIL", "plt.xticks(()) plt.yticks(()) def normalizeArray(a): return np.single(0.0 + a - a.min())", "**kwargs) def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)): for ext in saveext:", "= fmt vmode = vidseq.get(cv2.CAP_PROP_MODE) if vmode > 0: print", "is not None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) if", "> bounds[1]] = bounds[1] return b def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None):", "(a.max() - a.min()) else: b = (0.0 + a -", "', frameHeight metadata['FRAME_HEIGHT'] = frameHeight frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if frameWidth", "fid = (fid + 1) % maxFrames for vid in", "the ALPHA channel to have it in RGBA mode buf", "k == 81: # left arrow fid = (fid -", "convertFromRFtoBMode(imInputRF): return np.abs(scipy.signal.hilbert(imInputRF, axis=0)) def normalizeAngles(angleList, angle_range): return np.array( [angles.normalize(i,", "k == 27: break elif k == ord(' '): blnLoop", "if vmode > 0: print '\\MODE = ', vmode metadata['MODE']", "generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame, maxFrame, splineOrder): tZoom = np.float(numOutFrames) / (maxFrame", "cmap=plt.cm.gray) plt.grid(False) plt.xticks(()) plt.yticks(()) def normalizeArray(a): return np.single(0.0 + a", "'%.2d%%' % curPercent, print '\\n' imInput = np.dstack(imInput) vidseq.release() return", "20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def mplotShowImage(imInput): plt.imshow(imInput,", "for vid in range(len(imVideo)): if imVideo[vid].shape[-1] > maxFrames: maxFrames =", "+ 1) % maxFrames for vid in range(len(imVideo)): cv2.destroyWindow(strWindowName[vid]) def", "imCur = imVideo[vid][:, :, curVideoFid] # resize image if requested", "= {} numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print '\\tFRAME_COUNT = ', numFrames", "for fid in range(imVideo.shape[2]): plt.imsave(strFilePrefix + '.%.3d.tif' % (fid +", "def vis_checkerboard(im1, im2): im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2)) return sitk.GetArrayFromImage(im_chk) def", "% (curVideoFid + 1)) # look for \"esc\" key k", "= ', fps metadata['FPS'] = fps fmt = vidseq.get(cv2.CAP_PROP_FORMAT) if", "> 0: print '\\tFRAME WIDTH = ', frameWidth metadata['FRAME_WIDTH'] =", "dtype=np.uint8) buf.shape = (w, h, 4) # canvas.tostring_argb give pixmap", "cv2 import SimpleITK as sitk def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0,", "', numFrames metadata['FRAME_COUNT'] = numFrames frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if frameHeight", "cv2.COLOR_GRAY2RGB) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN,", "curVideoFid] # resize image if requested if resizeAmount: imCur =", "- bounds[0]) / (bounds[1] - bounds[0]) b[b < 0] =", "0: print '\\tFRAME HEIGHT = ', frameHeight metadata['FRAME_HEIGHT'] = frameHeight", "numpy 3D array of RGBA values \"\"\" # draw the", "(maxFrame - minFrame + 1) return scipy.ndimage.interpolation.zoom( imInput[:, :, minFrame:maxFrame", "vis_checkerboard(im1, im2): im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2)) return sitk.GetArrayFromImage(im_chk) def fig2data(fig):", "vmode > 0: print '\\MODE = ', vmode metadata['MODE'] =", "start timer tStart = time.time() # write video # fourcc", "# opencv 2.4 fourcc = cv2.VideoWriter_fourcc(*list(codec)) height, width = imVideo.shape[:2]", "plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) def convertFromRFtoBMode(imInputRF): return np.abs(scipy.signal.hilbert(imInputRF, axis=0)) def", "= 4 * sigmaSmooth + 1 print metadata # read", "ylabel: plt.ylabel(ylabel) def convertFromRFtoBMode(imInputRF): return np.abs(scipy.signal.hilbert(imInputRF, axis=0)) def normalizeAngles(angleList, angle_range):", "left arrow fid = (fid - 1) % maxFrames elif", "- 1) % maxFrames elif k == 83: # right", "curPercent = np.floor(100.0 * fid / numFrames) if curPercent >", "angles import cv2 import SimpleITK as sitk def cvShowImage(imDisp, strName,", "import cv2 import SimpleITK as sitk def cvShowImage(imDisp, strName, strAnnotation='',", "strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if resizeAmount is not None:", "fps = vidseq.get(cv2.CAP_PROP_FPS) if fps > 0: print '\\tFPS =", "= np.dstack(imInput) vidseq.release() return (imInput, metadata) def writeVideoToFile(imVideo, filename, codec='DIVX',", "# end timer tEnd = time.time() print 'Writing video {}", "with RGBA channels and return it @param fig a matplotlib", "'\\MODE = ', vmode metadata['MODE'] = MODE # smooth if", "return b def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None): vidseq = cv2.VideoCapture(dataFilePath) print", "metadata['FRAME_HEIGHT'] = frameHeight frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if frameWidth > 0:", "bounds=None): if bounds is None: return (0.0 + a -", "ext in saveext: plt.imsave(os.path.join(saveDir, fileName + ext), im) def generateGatedVideoUsingSplineInterp(imInput,", "if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0,", "to have it in RGBA mode buf = np.roll(buf, 3,", "= cv2.FOURCC(*list(codec)) # opencv 2.4 fourcc = cv2.VideoWriter_fourcc(*list(codec)) height, width", "print '\\tFRAME WIDTH = ', frameWidth metadata['FRAME_WIDTH'] = frameWidth fps", "1], (1, 1, tZoom), order=splineOrder) def ncorr(imA, imB): imA =", "def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None): if not isinstance(imVideo, list): imVideo", "False else: fid = (fid + 1) % maxFrames else:", "a matplotlib figure @return a numpy 3D array of RGBA", "imCur = scipy.misc.imresize(imCur, resizeAmount) # show image cvShowImage(imCur, strWindowName[vid], '%d'", "metadata['FRAME_WIDTH'] = frameWidth fps = vidseq.get(cv2.CAP_PROP_FPS) if fps > 0:", "imB): imA = (imA - imA.mean()) / imA.std() imB =", "sigmaSmooth=None, resizeAmount=None): vidseq = cv2.VideoCapture(dataFilePath) print vidseq, vidseq.isOpened() # print", "= np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4) # canvas.tostring_argb", "% (fid + 1), imVideo[:, :, fid]) # end timer", "= (fid - 1) % maxFrames elif k == 83:", "> 0: print '\\tFPS = ', fps metadata['FPS'] = fps", ":, i], strText, loc) return imVideoOut def cvShowVideo(imVideo, strWindowName, waitTime=30,", "fig2data(fig): \"\"\" @brief Convert a Matplotlib figure to a 4D", "@brief Convert a Matplotlib figure to a 4D numpy array", "loc=(2, 2), color=255): imInputPIL = PIL.Image.fromarray(imInput) d = ImageDraw.Draw(imInputPIL) d.text(loc,", "cv2.imshow(strName, imDisp) def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None):", "def ncorr(imA, imB): imA = (imA - imA.mean()) / imA.std()", "i] = AddTextOnImage(imVideo[:, :, i], strText, loc) return imVideoOut def", "a Matplotlib figure to a 4D numpy array with RGBA", "elif k == 81: # left arrow fid = (fid", "'): # space blnLoop = True elif k == 81:", "isinstance(imVideo, list): imVideo = [imVideo] strWindowName = [strWindowName] # find", "(imB - imB.mean()) / imB.std() return np.mean(imA * imB) def", "seconds'.format(filename, tEnd - tStart) # release writer.release() def writeVideoAsTiffStack(imVideo, strFilePrefix):", "video blnLoop = True fid = 0 while True: for", "= [imVideo] strWindowName = [strWindowName] # find max number of", "maxFrames elif k == 83: # right arrow fid =", "= ', numFrames metadata['FRAME_COUNT'] = numFrames frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if", "matplotlib.pyplot as plt import PIL from PIL import ImageDraw import", "wSmooth = 4 * sigmaSmooth + 1 print metadata #", "curVideoFid = fid % imVideo[vid].shape[2] imCur = imVideo[vid][:, :, curVideoFid]", "vidseq.get(cv2.CAP_PROP_MODE) if vmode > 0: print '\\MODE = ', vmode", "time.time() for fid in range(imVideo.shape[2]): plt.imsave(strFilePrefix + '.%.3d.tif' % (fid", "imA.std() imB = (imB - imB.mean()) / imB.std() return np.mean(imA", "plt.imshow(imInput, cmap=plt.cm.gray) plt.grid(False) plt.xticks(()) plt.yticks(()) def normalizeArray(a): return np.single(0.0 +", "# release writer.release() def writeVideoAsTiffStack(imVideo, strFilePrefix): # start timer tStart", "255), resizeAmount=None): if resizeAmount is not None: imDisp = cv2.resize(imDisp.copy(),", "vid in range(len(imVideo)): cv2.destroyWindow(strWindowName[vid]) def normalizeArray(a, bounds=None): if bounds is", "sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2)) return sitk.GetArrayFromImage(im_chk) def fig2data(fig): \"\"\" @brief Convert a", "saveDir, fileName, saveext=('.png',)): for ext in saveext: plt.imsave(os.path.join(saveDir, fileName +", "k == ord(' '): # space blnLoop = True elif", "arrow fid = (fid - 1) % maxFrames elif k", "fid = 0 while True: for vid in range(len(imVideo)): curVideoFid", "a - a.min()) / (a.max() - a.min()) def AddTextOnImage(imInput, strText,", "in range(len(imVideo)): if imVideo[vid].shape[-1] > maxFrames: maxFrames = imVideo[vid].shape[2] #", "print '\\FORMAT = ', fmt metadata['FORMAT'] = fmt vmode =", "a.min()) def AddTextOnImage(imInput, strText, loc=(2, 2), color=255): imInputPIL = PIL.Image.fromarray(imInput)", "vmode = vidseq.get(cv2.CAP_PROP_MODE) if vmode > 0: print '\\MODE =", "frames maxFrames = 0 for vid in range(len(imVideo)): if imVideo[vid].shape[-1]", "imVideo = [imVideo] strWindowName = [strWindowName] # find max number", "imA = (imA - imA.mean()) / imA.std() imB = (imB", "figure to a 4D numpy array with RGBA channels and", "numFrames metadata['FRAME_COUNT'] = numFrames frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if frameHeight >", "strText, loc) return imVideoOut def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None): if", "isColor=isColor) print writer.isOpened() numFrames = imVideo.shape[-1] for fid in range(numFrames):", "numpy array with RGBA channels and return it @param fig", "ext), **kwargs) def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)): for ext in", "= vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if frameHeight > 0: print '\\tFRAME HEIGHT =", "break elif k == ord(' '): blnLoop = False else:", "fmt = vidseq.get(cv2.CAP_PROP_FORMAT) if fmt > 0: print '\\FORMAT =", "True fid = 0 while True: for vid in range(len(imVideo)):", "cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB) if len(strAnnotation)", "# update progress fid += 1 curPercent = np.floor(100.0 *", "valid_object, frame = vidseq.read() if not valid_object: break frame =", "81: # left arrow fid = (fid - 1) %", "figure @return a numpy 3D array of RGBA values \"\"\"", "of RGBA values \"\"\" # draw the renderer fig.canvas.draw() #", "seconds'.format(strFilePrefix, tEnd - tStart) def mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None):", "angleList]) def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs): for ext in", "fx=resizeAmount, fy=resizeAmount) imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB) if len(strAnnotation) > 0:", "axis, xlabel=None, ylabel=None, title=None): plt.imshow(im.max(axis)) if title: plt.title(title) if xlabel:", "ord(' '): # space blnLoop = True elif k ==", "(0.0 + a - a.min()) / (a.max() - a.min()) else:", "0) imInput.append(frame) # update progress fid += 1 curPercent =", "valid_object: break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if resizeAmount: frame =", "1) return scipy.ndimage.interpolation.zoom( imInput[:, :, minFrame:maxFrame + 1], (1, 1,", "(fid - 1) % maxFrames elif k == 83: #", "arrow fid = (fid + 1) % maxFrames for vid", "key k = cv2.waitKey(waitTime) & 0xff if blnLoop: if k", "= ', frameWidth metadata['FRAME_WIDTH'] = frameWidth fps = vidseq.get(cv2.CAP_PROP_FPS) if", "writer.isOpened() numFrames = imVideo.shape[-1] for fid in range(numFrames): if isColor:", "b = (0.0 + a - bounds[0]) / (bounds[1] -", "frames imInput = [] fid = 0 prevPercent = 0", "# draw the renderer fig.canvas.draw() # Get the RGBA buffer", "numFrames) if curPercent > prevPercent: prevPercent = curPercent print '%.2d%%'", "np.array( [angles.normalize(i, angle_range[0], angle_range[1]) for i in angleList]) def SaveFigToDisk(saveDir,", "fileName + ext), im) def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame, maxFrame, splineOrder):", "= (fid + 1) % maxFrames for vid in range(len(imVideo)):", "% maxFrames for vid in range(len(imVideo)): cv2.destroyWindow(strWindowName[vid]) def normalizeArray(a, bounds=None):", "{} seconds'.format(strFilePrefix, tEnd - tStart) def mplotShowMIP(im, axis, xlabel=None, ylabel=None,", "imVideo[vid].shape[2] imCur = imVideo[vid][:, :, curVideoFid] # resize image if", "minFrame + 1) return scipy.ndimage.interpolation.zoom( imInput[:, :, minFrame:maxFrame + 1],", "cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10, 20),", "plt.ylabel(ylabel) def convertFromRFtoBMode(imInputRF): return np.abs(scipy.signal.hilbert(imInputRF, axis=0)) def normalizeAngles(angleList, angle_range): return", "== 83: # right arrow fid = (fid + 1)", "blnLoop = False else: fid = (fid + 1) %", "cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp)", "in angleList]) def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs): for ext", "wanted if sigmaSmooth: wSmooth = 4 * sigmaSmooth + 1", "scipy.misc.imresize(frame, resizeAmount) if sigmaSmooth: frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0)", "SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)): for ext in saveext: plt.imsave(os.path.join(saveDir, fileName", "for fid in range(numFrames): if isColor: writer.write(imVideo[:, :, :, fid].astype('uint8'))", "= cv2.VideoWriter_fourcc(*list(codec)) height, width = imVideo.shape[:2] writer = cv2.VideoWriter(filename, fourcc,", "'\\tFPS = ', fps metadata['FPS'] = fps fmt = vidseq.get(cv2.CAP_PROP_FORMAT)", "MODE # smooth if wanted if sigmaSmooth: wSmooth = 4", "np.abs(scipy.signal.hilbert(imInputRF, axis=0)) def normalizeAngles(angleList, angle_range): return np.array( [angles.normalize(i, angle_range[0], angle_range[1])", "Convert a Matplotlib figure to a 4D numpy array with", "mode. # Roll the ALPHA channel to have it in", "cv2.destroyWindow(strWindowName[vid]) def normalizeArray(a, bounds=None): if bounds is None: return (0.0", "xlabel=None, ylabel=None, title=None): plt.imshow(im.max(axis)) if title: plt.title(title) if xlabel: plt.xlabel(xlabel)", "maxFrames else: if k == 27: # escape break elif", "> maxFrames: maxFrames = imVideo[vid].shape[2] # display video blnLoop =", "plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) def convertFromRFtoBMode(imInputRF): return", "'\\FORMAT = ', fmt metadata['FORMAT'] = fmt vmode = vidseq.get(cv2.CAP_PROP_MODE)", "read video frames imInput = [] fid = 0 prevPercent", "1 curPercent = np.floor(100.0 * fid / numFrames) if curPercent", "- a.min()) / (a.max() - a.min()) def AddTextOnImage(imInput, strText, loc=(2,", "imDisp) def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if", "', frameWidth metadata['FRAME_WIDTH'] = frameWidth fps = vidseq.get(cv2.CAP_PROP_FPS) if fps", "- imB.mean()) / imB.std() return np.mean(imA * imB) def vis_checkerboard(im1,", "1 print metadata # read video frames imInput = []", "time.time() print 'Writing video {} took {} seconds'.format(filename, tEnd -", "start timer tStart = time.time() for fid in range(imVideo.shape[2]): plt.imsave(strFilePrefix", "give pixmap in ARGB mode. # Roll the ALPHA channel", "imVideo[vid][:, :, curVideoFid] # resize image if requested if resizeAmount:", "(0.0 + a - bounds[0]) / (bounds[1] - bounds[0]) b[b", "mplotShowImage(imInput): plt.imshow(imInput, cmap=plt.cm.gray) plt.grid(False) plt.xticks(()) plt.yticks(()) def normalizeArray(a): return np.single(0.0", "os, time import numpy as np import scipy.signal import scipy.misc", "== ord(' '): # space blnLoop = True elif k", "1) % maxFrames else: if k == 27: # escape", "not None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) if len(strAnnotation)", "[strWindowName] # find max number of frames maxFrames = 0", "print metadata metadata = {} numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print '\\tFRAME_COUNT", "vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print '\\tFRAME_COUNT = ', numFrames metadata['FRAME_COUNT'] = numFrames frameHeight", "sitk.GetImageFromArray(im2)) return sitk.GetArrayFromImage(im_chk) def fig2data(fig): \"\"\" @brief Convert a Matplotlib", "= vidseq.get(cv2.CAP_PROP_FPS) if fps > 0: print '\\tFPS = ',", "- tStart) # release writer.release() def writeVideoAsTiffStack(imVideo, strFilePrefix): # start", "+ 1) % maxFrames else: if k == 27: #", "= True fid = 0 while True: for vid in", "print 'Writing video {} took {} seconds'.format(filename, tEnd - tStart)", "ncorr(imA, imB): imA = (imA - imA.mean()) / imA.std() imB", "the renderer fig.canvas.draw() # Get the RGBA buffer from the", "saveext: plt.imsave(os.path.join(saveDir, fileName + ext), im) def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame,", "'.eps'), **kwargs): for ext in saveext: plt.savefig(os.path.join(saveDir, fileName + ext),", "it in RGBA mode buf = np.roll(buf, 3, axis=2) return", "maxFrames for vid in range(len(imVideo)): cv2.destroyWindow(strWindowName[vid]) def normalizeArray(a, bounds=None): if", "vid in range(len(imVideo)): if imVideo[vid].shape[-1] > maxFrames: maxFrames = imVideo[vid].shape[2]", "= imVideo.shape[:2] writer = cv2.VideoWriter(filename, fourcc, fps=fps, frameSize=(width, height), isColor=isColor)", "= imVideo[vid][:, :, curVideoFid] # resize image if requested if", "= 0 print '\\n' while True: valid_object, frame = vidseq.read()", "video {} took {} seconds'.format(strFilePrefix, tEnd - tStart) def mplotShowMIP(im,", "frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if frameHeight > 0: print '\\tFRAME HEIGHT", "= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if resizeAmount: frame = scipy.misc.imresize(frame, resizeAmount) if", "video {} took {} seconds'.format(filename, tEnd - tStart) # release", "resizeAmount: imCur = scipy.misc.imresize(imCur, resizeAmount) # show image cvShowImage(imCur, strWindowName[vid],", "if not valid_object: break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if resizeAmount:", "print '\\tFPS = ', fps metadata['FPS'] = fps fmt =", "cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if resizeAmount is", "return np.single(0.0 + a - a.min()) / (a.max() - a.min())", "= np.floor(100.0 * fid / numFrames) if curPercent > prevPercent:", "imB.mean()) / imB.std() return np.mean(imA * imB) def vis_checkerboard(im1, im2):", "= cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0) imInput.append(frame) # update progress fid", "return np.asarray(imInputPIL) def AddTextOnVideo(imVideo, strText, loc=(2, 2)): imVideoOut = np.zeros_like(imVideo)", "as plt import PIL from PIL import ImageDraw import angles", "strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def", "= 0 for vid in range(len(imVideo)): if imVideo[vid].shape[-1] > maxFrames:", "+ 1], (1, 1, tZoom), order=splineOrder) def ncorr(imA, imB): imA", "strText, loc=(2, 2)): imVideoOut = np.zeros_like(imVideo) for i in range(imVideo.shape[2]):", "smooth if wanted if sigmaSmooth: wSmooth = 4 * sigmaSmooth", "return np.mean(imA * imB) def vis_checkerboard(im1, im2): im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1),", "h, 4) # canvas.tostring_argb give pixmap in ARGB mode. #", "frameHeight metadata['FRAME_HEIGHT'] = frameHeight frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if frameWidth >", "% imVideo[vid].shape[2] imCur = imVideo[vid][:, :, curVideoFid] # resize image", "plt.imsave(strFilePrefix + '.%.3d.tif' % (fid + 1), imVideo[:, :, fid])", "minFrame, maxFrame, splineOrder): tZoom = np.float(numOutFrames) / (maxFrame - minFrame", "4D numpy array with RGBA channels and return it @param", "(w, h, 4) # canvas.tostring_argb give pixmap in ARGB mode.", "blnLoop = True elif k == 81: # left arrow", "27: break elif k == ord(' '): blnLoop = False", "== ord(' '): blnLoop = False else: fid = (fid", "= time.time() for fid in range(imVideo.shape[2]): plt.imsave(strFilePrefix + '.%.3d.tif' %", "sigmaSmooth + 1 print metadata # read video frames imInput", "= AddTextOnImage(imVideo[:, :, i], strText, loc) return imVideoOut def cvShowVideo(imVideo,", "return np.abs(scipy.signal.hilbert(imInputRF, axis=0)) def normalizeAngles(angleList, angle_range): return np.array( [angles.normalize(i, angle_range[0],", "plt.yticks(()) def normalizeArray(a): return np.single(0.0 + a - a.min()) /", "height), isColor=isColor) print writer.isOpened() numFrames = imVideo.shape[-1] for fid in", "fill=color) return np.asarray(imInputPIL) def AddTextOnVideo(imVideo, strText, loc=(2, 2)): imVideoOut =", "resizeAmount) if sigmaSmooth: frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0) imInput.append(frame)", "elif k == ord(' '): # space blnLoop = True", "strWindowName, waitTime=30, resizeAmount=None): if not isinstance(imVideo, list): imVideo = [imVideo]", "imInput.append(frame) # update progress fid += 1 curPercent = np.floor(100.0", "look for \"esc\" key k = cv2.waitKey(waitTime) & 0xff if", "0: cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName,", "imInput = np.dstack(imInput) vidseq.release() return (imInput, metadata) def writeVideoToFile(imVideo, filename,", "imInputPIL = PIL.Image.fromarray(imInput) d = ImageDraw.Draw(imInputPIL) d.text(loc, strText, fill=color) return", "as np import scipy.signal import scipy.misc import scipy.ndimage.filters import matplotlib.pyplot", "vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if frameWidth > 0: print '\\tFRAME WIDTH = ',", "print writer.isOpened() numFrames = imVideo.shape[-1] for fid in range(numFrames): if", "len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor,", "cv2.VideoWriter(filename, fourcc, fps=fps, frameSize=(width, height), isColor=isColor) print writer.isOpened() numFrames =", "def normalizeArray(a, bounds=None): if bounds is None: return (0.0 +", "True: valid_object, frame = vidseq.read() if not valid_object: break frame", "RGBA channels and return it @param fig a matplotlib figure", "# start timer tStart = time.time() # write video #", "prevPercent = 0 print '\\n' while True: valid_object, frame =", "def fig2data(fig): \"\"\" @brief Convert a Matplotlib figure to a", "# canvas.tostring_argb give pixmap in ARGB mode. # Roll the", "matplotlib figure @return a numpy 3D array of RGBA values", "sigmaSmooth: frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0) imInput.append(frame) # update", "= vidseq.get(cv2.CAP_PROP_MODE) if vmode > 0: print '\\MODE = ',", "metadata = {} numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print '\\tFRAME_COUNT = ',", "tEnd - tStart) # release writer.release() def writeVideoAsTiffStack(imVideo, strFilePrefix): #", "angle_range[1]) for i in angleList]) def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'),", "= np.zeros_like(imVideo) for i in range(imVideo.shape[2]): imVideoOut[:, :, i] =", "waitTime=30, resizeAmount=None): if not isinstance(imVideo, list): imVideo = [imVideo] strWindowName", "resizeAmount) # show image cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid +", "tStart = time.time() for fid in range(imVideo.shape[2]): plt.imsave(strFilePrefix + '.%.3d.tif'", "= cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) if len(strAnnotation) > 0: cv2.putText(imDisp,", "= (0.0 + a - bounds[0]) / (bounds[1] - bounds[0])", "== 81: # left arrow fid = (fid - 1)", "scipy.misc import scipy.ndimage.filters import matplotlib.pyplot as plt import PIL from", "+ 1) return scipy.ndimage.interpolation.zoom( imInput[:, :, minFrame:maxFrame + 1], (1,", "cv2.imshow(strName, imDisp) def mplotShowImage(imInput): plt.imshow(imInput, cmap=plt.cm.gray) plt.grid(False) plt.xticks(()) plt.yticks(()) def", "a.min()) / (a.max() - a.min()) else: b = (0.0 +", "image cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid + 1)) # look", "fid in range(numFrames): if isColor: writer.write(imVideo[:, :, :, fid].astype('uint8')) else:", "normalizeArray(a): return np.single(0.0 + a - a.min()) / (a.max() -", "blnLoop = True fid = 0 while True: for vid", "writer.release() def writeVideoAsTiffStack(imVideo, strFilePrefix): # start timer tStart = time.time()", "0 prevPercent = 0 print '\\n' while True: valid_object, frame", "if sigmaSmooth: wSmooth = 4 * sigmaSmooth + 1 print", "bounds[0] b[b > bounds[1]] = bounds[1] return b def loadVideoFromFile(dataFilePath,", "= frameWidth fps = vidseq.get(cv2.CAP_PROP_FPS) if fps > 0: print", "**kwargs): for ext in saveext: plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs)", "if curPercent > prevPercent: prevPercent = curPercent print '%.2d%%' %", "if wanted if sigmaSmooth: wSmooth = 4 * sigmaSmooth +", "= vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if frameWidth > 0: print '\\tFRAME WIDTH =", "> 0: print '\\MODE = ', vmode metadata['MODE'] = MODE", "fy=resizeAmount) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN,", "- minFrame + 1) return scipy.ndimage.interpolation.zoom( imInput[:, :, minFrame:maxFrame +", "strFilePrefix): # start timer tStart = time.time() for fid in", "h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w,", "(bounds[1] - bounds[0]) b[b < 0] = bounds[0] b[b >", "# smooth if wanted if sigmaSmooth: wSmooth = 4 *", "find max number of frames maxFrames = 0 for vid", "vidseq.get(cv2.CAP_PROP_FPS) if fps > 0: print '\\tFPS = ', fps", "= numFrames frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if frameHeight > 0: print", "= vidseq.get(cv2.CAP_PROP_FORMAT) if fmt > 0: print '\\FORMAT = ',", "imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) if len(strAnnotation) > 0:", "for ext in saveext: plt.imsave(os.path.join(saveDir, fileName + ext), im) def", "print '\\tFRAME_COUNT = ', numFrames metadata['FRAME_COUNT'] = numFrames frameHeight =", "if isColor: writer.write(imVideo[:, :, :, fid].astype('uint8')) else: writer.write(imVideo[:, :, fid].astype('uint8'))", "', fps metadata['FPS'] = fps fmt = vidseq.get(cv2.CAP_PROP_FORMAT) if fmt", "> prevPercent: prevPercent = curPercent print '%.2d%%' % curPercent, print", "AddTextOnImage(imInput, strText, loc=(2, 2), color=255): imInputPIL = PIL.Image.fromarray(imInput) d =", "Get the RGBA buffer from the figure w, h =", "frameHeight frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if frameWidth > 0: print '\\tFRAME", "import os, time import numpy as np import scipy.signal import", "WIDTH = ', frameWidth metadata['FRAME_WIDTH'] = frameWidth fps = vidseq.get(cv2.CAP_PROP_FPS)", "opencv 2.4 fourcc = cv2.VideoWriter_fourcc(*list(codec)) height, width = imVideo.shape[:2] writer", "resizeAmount is not None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount)", "< 0] = bounds[0] b[b > bounds[1]] = bounds[1] return", "is None: return (0.0 + a - a.min()) / (a.max()", "metadata['FRAME_COUNT'] = numFrames frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if frameHeight > 0:", "None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) imDisp = cv2.cvtColor(imDisp,", "'.%.3d.tif' % (fid + 1), imVideo[:, :, fid]) # end", "= vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print '\\tFRAME_COUNT = ', numFrames metadata['FRAME_COUNT'] = numFrames", "took {} seconds'.format(filename, tEnd - tStart) # release writer.release() def", "def normalizeArray(a): return np.single(0.0 + a - a.min()) / (a.max()", "scipy.ndimage.interpolation.zoom( imInput[:, :, minFrame:maxFrame + 1], (1, 1, tZoom), order=splineOrder)", "scipy.ndimage.filters import matplotlib.pyplot as plt import PIL from PIL import", "imInput[:, :, minFrame:maxFrame + 1], (1, 1, tZoom), order=splineOrder) def", "ALPHA channel to have it in RGBA mode buf =", "vidseq.get(cv2.CAP_PROP_FORMAT) if fmt > 0: print '\\FORMAT = ', fmt", "fid / numFrames) if curPercent > prevPercent: prevPercent = curPercent", "def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False): # start timer tStart", "# write video # fourcc = cv2.FOURCC(*list(codec)) # opencv 2.4", "plt.imshow(im.max(axis)) if title: plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel)", "- a.min()) else: b = (0.0 + a - bounds[0])", "+ '.%.3d.tif' % (fid + 1), imVideo[:, :, fid]) #", "> 0: print '\\FORMAT = ', fmt metadata['FORMAT'] = fmt", "+ ext), im) def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame, maxFrame, splineOrder): tZoom", "(imInput, metadata) def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False): # start", "timer tEnd = time.time() print 'Writing video {} took {}", "a numpy 3D array of RGBA values \"\"\" # draw", "def convertFromRFtoBMode(imInputRF): return np.abs(scipy.signal.hilbert(imInputRF, axis=0)) def normalizeAngles(angleList, angle_range): return np.array(", "writeVideoAsTiffStack(imVideo, strFilePrefix): # start timer tStart = time.time() for fid", "curPercent print '%.2d%%' % curPercent, print '\\n' imInput = np.dstack(imInput)", "in saveext: plt.imsave(os.path.join(saveDir, fileName + ext), im) def generateGatedVideoUsingSplineInterp(imInput, numOutFrames,", "= fps fmt = vidseq.get(cv2.CAP_PROP_FORMAT) if fmt > 0: print", "k == 83: # right arrow fid = (fid +", "1) % maxFrames for vid in range(len(imVideo)): cv2.destroyWindow(strWindowName[vid]) def normalizeArray(a,", "draw the renderer fig.canvas.draw() # Get the RGBA buffer from", "fid].astype('uint8')) else: writer.write(imVideo[:, :, fid].astype('uint8')) # end timer tEnd =", "fid = (fid + 1) % maxFrames else: if k", "end timer tEnd = time.time() print 'Writing video {} took", "# look for \"esc\" key k = cv2.waitKey(waitTime) & 0xff", "frame = vidseq.read() if not valid_object: break frame = cv2.cvtColor(frame,", "print '%.2d%%' % curPercent, print '\\n' imInput = np.dstack(imInput) vidseq.release()", "in range(len(imVideo)): curVideoFid = fid % imVideo[vid].shape[2] imCur = imVideo[vid][:,", "= curPercent print '%.2d%%' % curPercent, print '\\n' imInput =", "# fourcc = cv2.FOURCC(*list(codec)) # opencv 2.4 fourcc = cv2.VideoWriter_fourcc(*list(codec))", "= (w, h, 4) # canvas.tostring_argb give pixmap in ARGB", "{} took {} seconds'.format(filename, tEnd - tStart) # release writer.release()", "imVideoOut = np.zeros_like(imVideo) for i in range(imVideo.shape[2]): imVideoOut[:, :, i]", "the RGBA buffer from the figure w, h = fig.canvas.get_width_height()", "frame = scipy.misc.imresize(frame, resizeAmount) if sigmaSmooth: frame = cv2.GaussianBlur(frame, (wSmooth,", "frameWidth fps = vidseq.get(cv2.CAP_PROP_FPS) if fps > 0: print '\\tFPS", ":, fid].astype('uint8')) # end timer tEnd = time.time() print 'Writing", "np.mean(imA * imB) def vis_checkerboard(im1, im2): im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2))", "scipy.signal import scipy.misc import scipy.ndimage.filters import matplotlib.pyplot as plt import", "i], strText, loc) return imVideoOut def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None):", "cv2.COLOR_BGR2GRAY) if resizeAmount: frame = scipy.misc.imresize(frame, resizeAmount) if sigmaSmooth: frame", "'Writing video {} took {} seconds'.format(strFilePrefix, tEnd - tStart) def", "bounds[1] return b def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None): vidseq = cv2.VideoCapture(dataFilePath)", "numpy as np import scipy.signal import scipy.misc import scipy.ndimage.filters import", "(10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def cvShowColorImage(imDisp,", "if fps > 0: print '\\tFPS = ', fps metadata['FPS']", "height, width = imVideo.shape[:2] writer = cv2.VideoWriter(filename, fourcc, fps=fps, frameSize=(width,", "Roll the ALPHA channel to have it in RGBA mode", "+ 1)) # look for \"esc\" key k = cv2.waitKey(waitTime)", "1, tZoom), order=splineOrder) def ncorr(imA, imB): imA = (imA -", "/ (a.max() - a.min()) else: b = (0.0 + a", ":, fid]) # end timer tEnd = time.time() print 'Writing", "(fid + 1) % maxFrames else: if k == 27:", "i in range(imVideo.shape[2]): imVideoOut[:, :, i] = AddTextOnImage(imVideo[:, :, i],", "wSmooth), 0) imInput.append(frame) # update progress fid += 1 curPercent", "cv2.FOURCC(*list(codec)) # opencv 2.4 fourcc = cv2.VideoWriter_fourcc(*list(codec)) height, width =", "1), imVideo[:, :, fid]) # end timer tEnd = time.time()", "% maxFrames elif k == 83: # right arrow fid", "import angles import cv2 import SimpleITK as sitk def cvShowImage(imDisp,", "0 print '\\n' while True: valid_object, frame = vidseq.read() if", "return scipy.ndimage.interpolation.zoom( imInput[:, :, minFrame:maxFrame + 1], (1, 1, tZoom),", "right arrow fid = (fid + 1) % maxFrames for", "number of frames maxFrames = 0 for vid in range(len(imVideo)):", "def mplotShowImage(imInput): plt.imshow(imInput, cmap=plt.cm.gray) plt.grid(False) plt.xticks(()) plt.yticks(()) def normalizeArray(a): return", "RGBA buffer from the figure w, h = fig.canvas.get_width_height() buf", "plt import PIL from PIL import ImageDraw import angles import", "= cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10,", "print '\\n' imInput = np.dstack(imInput) vidseq.release() return (imInput, metadata) def", "maxFrame, splineOrder): tZoom = np.float(numOutFrames) / (maxFrame - minFrame +", "3D array of RGBA values \"\"\" # draw the renderer", "+ a - a.min()) / (a.max() - a.min()) else: b", "for i in angleList]) def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs):", "cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid + 1)) # look for", "image if requested if resizeAmount: imCur = scipy.misc.imresize(imCur, resizeAmount) #", "title=None): plt.imshow(im.max(axis)) if title: plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel:", "w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape =", "None: return (0.0 + a - a.min()) / (a.max() -", "np.single(0.0 + a - a.min()) / (a.max() - a.min()) def", "saveext=('.png', '.eps'), **kwargs): for ext in saveext: plt.savefig(os.path.join(saveDir, fileName +", "b def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None): vidseq = cv2.VideoCapture(dataFilePath) print vidseq,", "resizeAmount=None): if not isinstance(imVideo, list): imVideo = [imVideo] strWindowName =", "in range(numFrames): if isColor: writer.write(imVideo[:, :, :, fid].astype('uint8')) else: writer.write(imVideo[:,", "writer.write(imVideo[:, :, :, fid].astype('uint8')) else: writer.write(imVideo[:, :, fid].astype('uint8')) # end", "(imA - imA.mean()) / imA.std() imB = (imB - imB.mean())", "imB) def vis_checkerboard(im1, im2): im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2)) return sitk.GetArrayFromImage(im_chk)", "resizeAmount=None): vidseq = cv2.VideoCapture(dataFilePath) print vidseq, vidseq.isOpened() # print metadata", "- imA.mean()) / imA.std() imB = (imB - imB.mean()) /", "the figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)", "'\\tFRAME WIDTH = ', frameWidth metadata['FRAME_WIDTH'] = frameWidth fps =", "AddTextOnImage(imVideo[:, :, i], strText, loc) return imVideoOut def cvShowVideo(imVideo, strWindowName,", "order=splineOrder) def ncorr(imA, imB): imA = (imA - imA.mean()) /", "for vid in range(len(imVideo)): curVideoFid = fid % imVideo[vid].shape[2] imCur", "metadata['MODE'] = MODE # smooth if wanted if sigmaSmooth: wSmooth", "vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if frameHeight > 0: print '\\tFRAME HEIGHT = ',", "took {} seconds'.format(strFilePrefix, tEnd - tStart) def mplotShowMIP(im, axis, xlabel=None,", "= (imA - imA.mean()) / imA.std() imB = (imB -", "for \"esc\" key k = cv2.waitKey(waitTime) & 0xff if blnLoop:", "i in angleList]) def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs): for", "'\\tFRAME_COUNT = ', numFrames metadata['FRAME_COUNT'] = numFrames frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT)", "ext in saveext: plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs) def SaveImageToDisk(im,", "frameHeight > 0: print '\\tFRAME HEIGHT = ', frameHeight metadata['FRAME_HEIGHT']", "fileName, saveext=('.png',)): for ext in saveext: plt.imsave(os.path.join(saveDir, fileName + ext),", "a - bounds[0]) / (bounds[1] - bounds[0]) b[b < 0]", "# space blnLoop = True elif k == 81: #", "(fid + 1), imVideo[:, :, fid]) # end timer tEnd", "tZoom = np.float(numOutFrames) / (maxFrame - minFrame + 1) return", "fid]) # end timer tEnd = time.time() print 'Writing video", "4 * sigmaSmooth + 1 print metadata # read video", "im) def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame, maxFrame, splineOrder): tZoom = np.float(numOutFrames)", "/ (a.max() - a.min()) def AddTextOnImage(imInput, strText, loc=(2, 2), color=255):", "a.min()) / (a.max() - a.min()) def AddTextOnImage(imInput, strText, loc=(2, 2),", "metadata metadata = {} numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print '\\tFRAME_COUNT =", "vidseq, vidseq.isOpened() # print metadata metadata = {} numFrames =", "channels and return it @param fig a matplotlib figure @return", "def normalizeAngles(angleList, angle_range): return np.array( [angles.normalize(i, angle_range[0], angle_range[1]) for i", "= np.float(numOutFrames) / (maxFrame - minFrame + 1) return scipy.ndimage.interpolation.zoom(", "blnLoop: if k == 27: break elif k == ord('", "metadata['FORMAT'] = fmt vmode = vidseq.get(cv2.CAP_PROP_MODE) if vmode > 0:", "[angles.normalize(i, angle_range[0], angle_range[1]) for i in angleList]) def SaveFigToDisk(saveDir, fileName,", "== 27: # escape break elif k == ord(' '):", "return (0.0 + a - a.min()) / (a.max() - a.min())", "tStart) def mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None): plt.imshow(im.max(axis)) if title:", "im2): im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2)) return sitk.GetArrayFromImage(im_chk) def fig2data(fig): \"\"\"", "bounds[1]] = bounds[1] return b def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None): vidseq", "* fid / numFrames) if curPercent > prevPercent: prevPercent =", "= [strWindowName] # find max number of frames maxFrames =", "\"\"\" # draw the renderer fig.canvas.draw() # Get the RGBA", "None, fx=resizeAmount, fy=resizeAmount) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10,", "display video blnLoop = True fid = 0 while True:", "fourcc, fps=fps, frameSize=(width, height), isColor=isColor) print writer.isOpened() numFrames = imVideo.shape[-1]", "= cv2.waitKey(waitTime) & 0xff if blnLoop: if k == 27:", "imVideo[vid].shape[-1] > maxFrames: maxFrames = imVideo[vid].shape[2] # display video blnLoop", "None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) if len(strAnnotation) >", "ylabel=None, title=None): plt.imshow(im.max(axis)) if title: plt.title(title) if xlabel: plt.xlabel(xlabel) if", "numFrames frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT) if frameHeight > 0: print '\\tFRAME", "'Writing video {} took {} seconds'.format(filename, tEnd - tStart) #", "imB = (imB - imB.mean()) / imB.std() return np.mean(imA *", "loc=(2, 2)): imVideoOut = np.zeros_like(imVideo) for i in range(imVideo.shape[2]): imVideoOut[:,", "d.text(loc, strText, fill=color) return np.asarray(imInputPIL) def AddTextOnVideo(imVideo, strText, loc=(2, 2)):", "if ylabel: plt.ylabel(ylabel) def convertFromRFtoBMode(imInputRF): return np.abs(scipy.signal.hilbert(imInputRF, axis=0)) def normalizeAngles(angleList,", "title: plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) def convertFromRFtoBMode(imInputRF):", "(1, 1, tZoom), order=splineOrder) def ncorr(imA, imB): imA = (imA", "else: if k == 27: # escape break elif k", "# escape break elif k == ord(' '): # space", "fps fmt = vidseq.get(cv2.CAP_PROP_FORMAT) if fmt > 0: print '\\FORMAT", "while True: valid_object, frame = vidseq.read() if not valid_object: break", "write video # fourcc = cv2.FOURCC(*list(codec)) # opencv 2.4 fourcc", "array with RGBA channels and return it @param fig a", "show image cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid + 1)) #", "metadata['FPS'] = fps fmt = vidseq.get(cv2.CAP_PROP_FORMAT) if fmt > 0:", "sitk.GetArrayFromImage(im_chk) def fig2data(fig): \"\"\" @brief Convert a Matplotlib figure to", "frameWidth > 0: print '\\tFRAME WIDTH = ', frameWidth metadata['FRAME_WIDTH']", "tZoom), order=splineOrder) def ncorr(imA, imB): imA = (imA - imA.mean())", "buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4) #", "metadata # read video frames imInput = [] fid =", "if frameHeight > 0: print '\\tFRAME HEIGHT = ', frameHeight", "\"esc\" key k = cv2.waitKey(waitTime) & 0xff if blnLoop: if", "a - a.min()) / (a.max() - a.min()) else: b =", "= ', vmode metadata['MODE'] = MODE # smooth if wanted", "= ', frameHeight metadata['FRAME_HEIGHT'] = frameHeight frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if", "k == 27: # escape break elif k == ord('", "if title: plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) def", "+ 1 print metadata # read video frames imInput =", "writer.write(imVideo[:, :, fid].astype('uint8')) # end timer tEnd = time.time() print", "strWindowName = [strWindowName] # find max number of frames maxFrames", "ext), im) def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame, maxFrame, splineOrder): tZoom =", "0: print '\\tFPS = ', fps metadata['FPS'] = fps fmt", "not valid_object: break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if resizeAmount: frame", "plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs) def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)):", "space blnLoop = True elif k == 81: # left", "(10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def mplotShowImage(imInput):", "release writer.release() def writeVideoAsTiffStack(imVideo, strFilePrefix): # start timer tStart =", "curPercent, print '\\n' imInput = np.dstack(imInput) vidseq.release() return (imInput, metadata)", "textColor, thickness=2) cv2.imshow(strName, imDisp) def mplotShowImage(imInput): plt.imshow(imInput, cmap=plt.cm.gray) plt.grid(False) plt.xticks(())", "tStart = time.time() # write video # fourcc = cv2.FOURCC(*list(codec))", "- a.min()) / (a.max() - a.min()) else: b = (0.0", "break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if resizeAmount: frame = scipy.misc.imresize(frame,", "vid in range(len(imVideo)): curVideoFid = fid % imVideo[vid].shape[2] imCur =", "= True elif k == 81: # left arrow fid", "True elif k == 81: # left arrow fid =", "fps=30, isColor=False): # start timer tStart = time.time() # write", "imVideo.shape[-1] for fid in range(numFrames): if isColor: writer.write(imVideo[:, :, :,", "cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation,", "elif k == 83: # right arrow fid = (fid", "plt.grid(False) plt.xticks(()) plt.yticks(()) def normalizeArray(a): return np.single(0.0 + a -", "imVideo[:, :, fid]) # end timer tEnd = time.time() print", "+= 1 curPercent = np.floor(100.0 * fid / numFrames) if", "- bounds[0]) b[b < 0] = bounds[0] b[b > bounds[1]]", "fig a matplotlib figure @return a numpy 3D array of", "numFrames = imVideo.shape[-1] for fid in range(numFrames): if isColor: writer.write(imVideo[:,", "break elif k == ord(' '): # space blnLoop =", "0: print '\\tFRAME WIDTH = ', frameWidth metadata['FRAME_WIDTH'] = frameWidth", "timer tStart = time.time() # write video # fourcc =", "fourcc = cv2.FOURCC(*list(codec)) # opencv 2.4 fourcc = cv2.VideoWriter_fourcc(*list(codec)) height,", "'\\tFRAME HEIGHT = ', frameHeight metadata['FRAME_HEIGHT'] = frameHeight frameWidth =", "if k == 27: break elif k == ord(' '):", "2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0,", "[imVideo] strWindowName = [strWindowName] # find max number of frames", "# print metadata metadata = {} numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print", "textColor, thickness=2) cv2.imshow(strName, imDisp) def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0,", "None, fx=resizeAmount, fy=resizeAmount) imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB) if len(strAnnotation) >", "filename, codec='DIVX', fps=30, isColor=False): # start timer tStart = time.time()", "print '\\MODE = ', vmode metadata['MODE'] = MODE # smooth", "tEnd - tStart) def mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None): plt.imshow(im.max(axis))", "progress fid += 1 curPercent = np.floor(100.0 * fid /", "\"\"\" @brief Convert a Matplotlib figure to a 4D numpy", "frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0) imInput.append(frame) # update progress", "np.zeros_like(imVideo) for i in range(imVideo.shape[2]): imVideoOut[:, :, i] = AddTextOnImage(imVideo[:,", "True: for vid in range(len(imVideo)): curVideoFid = fid % imVideo[vid].shape[2]", "@param fig a matplotlib figure @return a numpy 3D array", "(wSmooth, wSmooth), 0) imInput.append(frame) # update progress fid += 1", "loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None): vidseq = cv2.VideoCapture(dataFilePath) print vidseq, vidseq.isOpened() #", "83: # right arrow fid = (fid + 1) %", "HEIGHT = ', frameHeight metadata['FRAME_HEIGHT'] = frameHeight frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH)", "frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if frameWidth > 0: print '\\tFRAME WIDTH", "return (imInput, metadata) def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False): #", "PIL.Image.fromarray(imInput) d = ImageDraw.Draw(imInputPIL) d.text(loc, strText, fill=color) return np.asarray(imInputPIL) def", "d = ImageDraw.Draw(imInputPIL) d.text(loc, strText, fill=color) return np.asarray(imInputPIL) def AddTextOnVideo(imVideo,", "imA.mean()) / imA.std() imB = (imB - imB.mean()) / imB.std()", "if fmt > 0: print '\\FORMAT = ', fmt metadata['FORMAT']", "= bounds[1] return b def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None): vidseq =", "maxFrames = imVideo[vid].shape[2] # display video blnLoop = True fid", "numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print '\\tFRAME_COUNT = ', numFrames metadata['FRAME_COUNT'] =", "cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if resizeAmount is", "if bounds is None: return (0.0 + a - a.min())", "else: writer.write(imVideo[:, :, fid].astype('uint8')) # end timer tEnd = time.time()", "a 4D numpy array with RGBA channels and return it", "# find max number of frames maxFrames = 0 for", "# read video frames imInput = [] fid = 0", "% maxFrames else: if k == 27: # escape break", "fid = (fid - 1) % maxFrames elif k ==", "normalizeAngles(angleList, angle_range): return np.array( [angles.normalize(i, angle_range[0], angle_range[1]) for i in", "'%d' % (curVideoFid + 1)) # look for \"esc\" key", "print metadata # read video frames imInput = [] fid", "k == ord(' '): blnLoop = False else: fid =", "# show image cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid + 1))", "in saveext: plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs) def SaveImageToDisk(im, saveDir,", "pixmap in ARGB mode. # Roll the ALPHA channel to", "cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def cvShowColorImage(imDisp, strName, strAnnotation='',", "# resize image if requested if resizeAmount: imCur = scipy.misc.imresize(imCur,", "frameWidth metadata['FRAME_WIDTH'] = frameWidth fps = vidseq.get(cv2.CAP_PROP_FPS) if fps >", ":, curVideoFid] # resize image if requested if resizeAmount: imCur", "# right arrow fid = (fid + 1) % maxFrames", "prevPercent = curPercent print '%.2d%%' % curPercent, print '\\n' imInput", "renderer fig.canvas.draw() # Get the RGBA buffer from the figure", "fx=resizeAmount, fy=resizeAmount) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation, (10, 20),", "metadata) def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False): # start timer", "range(imVideo.shape[2]): plt.imsave(strFilePrefix + '.%.3d.tif' % (fid + 1), imVideo[:, :,", "/ imA.std() imB = (imB - imB.mean()) / imB.std() return", "def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)): for ext in saveext: plt.imsave(os.path.join(saveDir,", "= imVideo.shape[-1] for fid in range(numFrames): if isColor: writer.write(imVideo[:, :,", "else: b = (0.0 + a - bounds[0]) / (bounds[1]", "def AddTextOnImage(imInput, strText, loc=(2, 2), color=255): imInputPIL = PIL.Image.fromarray(imInput) d", "/ imB.std() return np.mean(imA * imB) def vis_checkerboard(im1, im2): im_chk", "imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB) if len(strAnnotation) > 0: cv2.putText(imDisp, strAnnotation,", "color=255): imInputPIL = PIL.Image.fromarray(imInput) d = ImageDraw.Draw(imInputPIL) d.text(loc, strText, fill=color)", "= ImageDraw.Draw(imInputPIL) d.text(loc, strText, fill=color) return np.asarray(imInputPIL) def AddTextOnVideo(imVideo, strText,", "cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if resizeAmount: frame = scipy.misc.imresize(frame, resizeAmount) if sigmaSmooth:", "not None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) imDisp =", "vidseq.release() return (imInput, metadata) def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False):", "sitk def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if", "2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def mplotShowImage(imInput): plt.imshow(imInput, cmap=plt.cm.gray) plt.grid(False)", "maxFrames: maxFrames = imVideo[vid].shape[2] # display video blnLoop = True", "channel to have it in RGBA mode buf = np.roll(buf,", "from the figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(),", "fid in range(imVideo.shape[2]): plt.imsave(strFilePrefix + '.%.3d.tif' % (fid + 1),", "in range(len(imVideo)): cv2.destroyWindow(strWindowName[vid]) def normalizeArray(a, bounds=None): if bounds is None:", ":, :, fid].astype('uint8')) else: writer.write(imVideo[:, :, fid].astype('uint8')) # end timer", "4) # canvas.tostring_argb give pixmap in ARGB mode. # Roll", "import numpy as np import scipy.signal import scipy.misc import scipy.ndimage.filters", "it @param fig a matplotlib figure @return a numpy 3D", "np.asarray(imInputPIL) def AddTextOnVideo(imVideo, strText, loc=(2, 2)): imVideoOut = np.zeros_like(imVideo) for", "2)): imVideoOut = np.zeros_like(imVideo) for i in range(imVideo.shape[2]): imVideoOut[:, :,", "b[b > bounds[1]] = bounds[1] return b def loadVideoFromFile(dataFilePath, sigmaSmooth=None,", "is not None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount, fy=resizeAmount) imDisp", "= imVideo[vid].shape[2] # display video blnLoop = True fid =", "video frames imInput = [] fid = 0 prevPercent =", "figure w, h = fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape", "* sigmaSmooth + 1 print metadata # read video frames", "(a.max() - a.min()) def AddTextOnImage(imInput, strText, loc=(2, 2), color=255): imInputPIL", "= MODE # smooth if wanted if sigmaSmooth: wSmooth =", "fileName + ext), **kwargs) def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)): for", "@return a numpy 3D array of RGBA values \"\"\" #", "- a.min()) def AddTextOnImage(imInput, strText, loc=(2, 2), color=255): imInputPIL =", "to a 4D numpy array with RGBA channels and return", "np.float(numOutFrames) / (maxFrame - minFrame + 1) return scipy.ndimage.interpolation.zoom( imInput[:,", "textColor=(0, 0, 255), resizeAmount=None): if resizeAmount is not None: imDisp", "if resizeAmount: frame = scipy.misc.imresize(frame, resizeAmount) if sigmaSmooth: frame =", "thickness=2) cv2.imshow(strName, imDisp) def mplotShowImage(imInput): plt.imshow(imInput, cmap=plt.cm.gray) plt.grid(False) plt.xticks(()) plt.yticks(())", "> 0: cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2)", "if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) def convertFromRFtoBMode(imInputRF): return np.abs(scipy.signal.hilbert(imInputRF,", "+ a - bounds[0]) / (bounds[1] - bounds[0]) b[b <", "= ', fmt metadata['FORMAT'] = fmt vmode = vidseq.get(cv2.CAP_PROP_MODE) if", "2.4 fourcc = cv2.VideoWriter_fourcc(*list(codec)) height, width = imVideo.shape[:2] writer =", "def writeVideoAsTiffStack(imVideo, strFilePrefix): # start timer tStart = time.time() for", "RGBA values \"\"\" # draw the renderer fig.canvas.draw() # Get", "# start timer tStart = time.time() for fid in range(imVideo.shape[2]):", "while True: for vid in range(len(imVideo)): curVideoFid = fid %", "* imB) def vis_checkerboard(im1, im2): im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2)) return", "elif k == ord(' '): blnLoop = False else: fid", "resizeAmount=None): if resizeAmount is not None: imDisp = cv2.resize(imDisp.copy(), None,", "xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) def convertFromRFtoBMode(imInputRF): return np.abs(scipy.signal.hilbert(imInputRF, axis=0))", "writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False): # start timer tStart =", "else: fid = (fid + 1) % maxFrames else: if", "fps metadata['FPS'] = fps fmt = vidseq.get(cv2.CAP_PROP_FORMAT) if fmt >", "cv2.VideoWriter_fourcc(*list(codec)) height, width = imVideo.shape[:2] writer = cv2.VideoWriter(filename, fourcc, fps=fps,", "vidseq = cv2.VideoCapture(dataFilePath) print vidseq, vidseq.isOpened() # print metadata metadata", "AddTextOnVideo(imVideo, strText, loc=(2, 2)): imVideoOut = np.zeros_like(imVideo) for i in", "= time.time() print 'Writing video {} took {} seconds'.format(strFilePrefix, tEnd", "= vidseq.read() if not valid_object: break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)", "> 0: print '\\tFRAME HEIGHT = ', frameHeight metadata['FRAME_HEIGHT'] =", "1)) # look for \"esc\" key k = cv2.waitKey(waitTime) &", "vmode metadata['MODE'] = MODE # smooth if wanted if sigmaSmooth:", "timer tStart = time.time() for fid in range(imVideo.shape[2]): plt.imsave(strFilePrefix +", "ARGB mode. # Roll the ALPHA channel to have it", "resizeAmount: frame = scipy.misc.imresize(frame, resizeAmount) if sigmaSmooth: frame = cv2.GaussianBlur(frame,", "imDisp) def mplotShowImage(imInput): plt.imshow(imInput, cmap=plt.cm.gray) plt.grid(False) plt.xticks(()) plt.yticks(()) def normalizeArray(a):", "fmt vmode = vidseq.get(cv2.CAP_PROP_MODE) if vmode > 0: print '\\MODE", "0] = bounds[0] b[b > bounds[1]] = bounds[1] return b", "range(numFrames): if isColor: writer.write(imVideo[:, :, :, fid].astype('uint8')) else: writer.write(imVideo[:, :,", "list): imVideo = [imVideo] strWindowName = [strWindowName] # find max", "0: print '\\MODE = ', vmode metadata['MODE'] = MODE #", "angle_range): return np.array( [angles.normalize(i, angle_range[0], angle_range[1]) for i in angleList])", "minFrame:maxFrame + 1], (1, 1, tZoom), order=splineOrder) def ncorr(imA, imB):", "= PIL.Image.fromarray(imInput) d = ImageDraw.Draw(imInputPIL) d.text(loc, strText, fill=color) return np.asarray(imInputPIL)", "27: # escape break elif k == ord(' '): #", "= time.time() print 'Writing video {} took {} seconds'.format(filename, tEnd", "strText, loc=(2, 2), color=255): imInputPIL = PIL.Image.fromarray(imInput) d = ImageDraw.Draw(imInputPIL)", "axis=0)) def normalizeAngles(angleList, angle_range): return np.array( [angles.normalize(i, angle_range[0], angle_range[1]) for", "and return it @param fig a matplotlib figure @return a", "= False else: fid = (fid + 1) % maxFrames", "if not isinstance(imVideo, list): imVideo = [imVideo] strWindowName = [strWindowName]", "def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame, maxFrame, splineOrder): tZoom = np.float(numOutFrames) /", "cv2.waitKey(waitTime) & 0xff if blnLoop: if k == 27: break", "normalizeArray(a, bounds=None): if bounds is None: return (0.0 + a", "== 27: break elif k == ord(' '): blnLoop =", "vidseq.read() if not valid_object: break frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if", "as sitk def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None):", "maxFrames = 0 for vid in range(len(imVideo)): if imVideo[vid].shape[-1] >", "tEnd = time.time() print 'Writing video {} took {} seconds'.format(strFilePrefix,", "def AddTextOnVideo(imVideo, strText, loc=(2, 2)): imVideoOut = np.zeros_like(imVideo) for i", "def mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None): plt.imshow(im.max(axis)) if title: plt.title(title)", "from PIL import ImageDraw import angles import cv2 import SimpleITK", "# Roll the ALPHA channel to have it in RGBA", "= fid % imVideo[vid].shape[2] imCur = imVideo[vid][:, :, curVideoFid] #", "in ARGB mode. # Roll the ALPHA channel to have", "buffer from the figure w, h = fig.canvas.get_width_height() buf =", "buf.shape = (w, h, 4) # canvas.tostring_argb give pixmap in", "/ (bounds[1] - bounds[0]) b[b < 0] = bounds[0] b[b", "isColor: writer.write(imVideo[:, :, :, fid].astype('uint8')) else: writer.write(imVideo[:, :, fid].astype('uint8')) #", "np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4) # canvas.tostring_argb give", "max number of frames maxFrames = 0 for vid in", "'\\n' while True: valid_object, frame = vidseq.read() if not valid_object:", ":, minFrame:maxFrame + 1], (1, 1, tZoom), order=splineOrder) def ncorr(imA,", "bounds is None: return (0.0 + a - a.min()) /", "resize image if requested if resizeAmount: imCur = scipy.misc.imresize(imCur, resizeAmount)", "/ (maxFrame - minFrame + 1) return scipy.ndimage.interpolation.zoom( imInput[:, :,", "0 for vid in range(len(imVideo)): if imVideo[vid].shape[-1] > maxFrames: maxFrames", "= fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h,", "range(len(imVideo)): cv2.destroyWindow(strWindowName[vid]) def normalizeArray(a, bounds=None): if bounds is None: return", "fid].astype('uint8')) # end timer tEnd = time.time() print 'Writing video", "array of RGBA values \"\"\" # draw the renderer fig.canvas.draw()", "# left arrow fid = (fid - 1) % maxFrames", "if sigmaSmooth: frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0) imInput.append(frame) #", "tStart) # release writer.release() def writeVideoAsTiffStack(imVideo, strFilePrefix): # start timer", "fourcc = cv2.VideoWriter_fourcc(*list(codec)) height, width = imVideo.shape[:2] writer = cv2.VideoWriter(filename,", "import SimpleITK as sitk def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0,", "# Get the RGBA buffer from the figure w, h", "(curVideoFid + 1)) # look for \"esc\" key k =", "= cv2.VideoWriter(filename, fourcc, fps=fps, frameSize=(width, height), isColor=isColor) print writer.isOpened() numFrames", "if resizeAmount is not None: imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount,", "0, 255), resizeAmount=None): if resizeAmount is not None: imDisp =", "cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0) imInput.append(frame) # update progress fid +=", "in range(imVideo.shape[2]): imVideoOut[:, :, i] = AddTextOnImage(imVideo[:, :, i], strText,", "escape break elif k == ord(' '): # space blnLoop", "import scipy.misc import scipy.ndimage.filters import matplotlib.pyplot as plt import PIL", "{} seconds'.format(filename, tEnd - tStart) # release writer.release() def writeVideoAsTiffStack(imVideo,", "- tStart) def mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None): plt.imshow(im.max(axis)) if", "= scipy.misc.imresize(frame, resizeAmount) if sigmaSmooth: frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth),", "values \"\"\" # draw the renderer fig.canvas.draw() # Get the", "def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if resizeAmount", "if frameWidth > 0: print '\\tFRAME WIDTH = ', frameWidth", "return it @param fig a matplotlib figure @return a numpy", "if k == 27: # escape break elif k ==", "saveext: plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs) def SaveImageToDisk(im, saveDir, fileName,", "fid = 0 prevPercent = 0 print '\\n' while True:", "+ 1), imVideo[:, :, fid]) # end timer tEnd =", "& 0xff if blnLoop: if k == 27: break elif", "scipy.misc.imresize(imCur, resizeAmount) # show image cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid", "numOutFrames, minFrame, maxFrame, splineOrder): tZoom = np.float(numOutFrames) / (maxFrame -", "return imVideoOut def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None): if not isinstance(imVideo,", "return sitk.GetArrayFromImage(im_chk) def fig2data(fig): \"\"\" @brief Convert a Matplotlib figure", "np import scipy.signal import scipy.misc import scipy.ndimage.filters import matplotlib.pyplot as", "+ a - a.min()) / (a.max() - a.min()) def AddTextOnImage(imInput,", "mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None): plt.imshow(im.max(axis)) if title: plt.title(title) if", "time.time() print 'Writing video {} took {} seconds'.format(strFilePrefix, tEnd -", "print '\\n' while True: valid_object, frame = vidseq.read() if not", "bounds[0]) b[b < 0] = bounds[0] b[b > bounds[1]] =", "prevPercent: prevPercent = curPercent print '%.2d%%' % curPercent, print '\\n'", "thickness=2) cv2.imshow(strName, imDisp) def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255),", "strText, fill=color) return np.asarray(imInputPIL) def AddTextOnVideo(imVideo, strText, loc=(2, 2)): imVideoOut", "imVideo.shape[:2] writer = cv2.VideoWriter(filename, fourcc, fps=fps, frameSize=(width, height), isColor=isColor) print", "SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs): for ext in saveext: plt.savefig(os.path.join(saveDir,", "canvas.tostring_argb give pixmap in ARGB mode. # Roll the ALPHA", "def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if resizeAmount", "fid % imVideo[vid].shape[2] imCur = imVideo[vid][:, :, curVideoFid] # resize", "plt.imsave(os.path.join(saveDir, fileName + ext), im) def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame, maxFrame,", "'): blnLoop = False else: fid = (fid + 1)", "not isinstance(imVideo, list): imVideo = [imVideo] strWindowName = [strWindowName] #", "+ ext), **kwargs) def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)): for ext", "strName, strAnnotation='', textColor=(0, 0, 255), resizeAmount=None): if resizeAmount is not", "cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None): if not isinstance(imVideo, list): imVideo =", "= bounds[0] b[b > bounds[1]] = bounds[1] return b def", "angle_range[0], angle_range[1]) for i in angleList]) def SaveFigToDisk(saveDir, fileName, saveext=('.png',", "for i in range(imVideo.shape[2]): imVideoOut[:, :, i] = AddTextOnImage(imVideo[:, :,", ":, i] = AddTextOnImage(imVideo[:, :, i], strText, loc) return imVideoOut", "imVideoOut[:, :, i] = AddTextOnImage(imVideo[:, :, i], strText, loc) return", "imVideoOut def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None): if not isinstance(imVideo, list):", "0 while True: for vid in range(len(imVideo)): curVideoFid = fid", "bounds[0]) / (bounds[1] - bounds[0]) b[b < 0] = bounds[0]", "def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None): vidseq = cv2.VideoCapture(dataFilePath) print vidseq, vidseq.isOpened()", "width = imVideo.shape[:2] writer = cv2.VideoWriter(filename, fourcc, fps=fps, frameSize=(width, height),", "splineOrder): tZoom = np.float(numOutFrames) / (maxFrame - minFrame + 1)", "import matplotlib.pyplot as plt import PIL from PIL import ImageDraw", "= (imB - imB.mean()) / imB.std() return np.mean(imA * imB)", "Matplotlib figure to a 4D numpy array with RGBA channels", "fig.canvas.get_width_height() buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8) buf.shape = (w, h, 4)", "{} took {} seconds'.format(strFilePrefix, tEnd - tStart) def mplotShowMIP(im, axis,", "= cv2.VideoCapture(dataFilePath) print vidseq, vidseq.isOpened() # print metadata metadata =", "0xff if blnLoop: if k == 27: break elif k", "tEnd = time.time() print 'Writing video {} took {} seconds'.format(filename,", "b[b < 0] = bounds[0] b[b > bounds[1]] = bounds[1]", "return np.array( [angles.normalize(i, angle_range[0], angle_range[1]) for i in angleList]) def", "im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2)) return sitk.GetArrayFromImage(im_chk) def fig2data(fig): \"\"\" @brief", "print 'Writing video {} took {} seconds'.format(strFilePrefix, tEnd - tStart)", "a.min()) else: b = (0.0 + a - bounds[0]) /", "saveext=('.png',)): for ext in saveext: plt.imsave(os.path.join(saveDir, fileName + ext), im)", "PIL import ImageDraw import angles import cv2 import SimpleITK as", "fps=fps, frameSize=(width, height), isColor=isColor) print writer.isOpened() numFrames = imVideo.shape[-1] for", "for ext in saveext: plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs) def", "np.dstack(imInput) vidseq.release() return (imInput, metadata) def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30,", "% curPercent, print '\\n' imInput = np.dstack(imInput) vidseq.release() return (imInput,", "np.floor(100.0 * fid / numFrames) if curPercent > prevPercent: prevPercent", "of frames maxFrames = 0 for vid in range(len(imVideo)): if", "fps > 0: print '\\tFPS = ', fps metadata['FPS'] =", "time.time() # write video # fourcc = cv2.FOURCC(*list(codec)) # opencv", "= 0 prevPercent = 0 print '\\n' while True: valid_object,", "{} numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT) print '\\tFRAME_COUNT = ', numFrames metadata['FRAME_COUNT']", "= frameHeight frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH) if frameWidth > 0: print", "= sitk.CheckerBoard(sitk.GetImageFromArray(im1), sitk.GetImageFromArray(im2)) return sitk.GetArrayFromImage(im_chk) def fig2data(fig): \"\"\" @brief Convert", "20), cv2.FONT_HERSHEY_PLAIN, 2.0, textColor, thickness=2) cv2.imshow(strName, imDisp) def cvShowColorImage(imDisp, strName,", "fmt > 0: print '\\FORMAT = ', fmt metadata['FORMAT'] =", "in range(imVideo.shape[2]): plt.imsave(strFilePrefix + '.%.3d.tif' % (fid + 1), imVideo[:,", "time import numpy as np import scipy.signal import scipy.misc import", "print vidseq, vidseq.isOpened() # print metadata metadata = {} numFrames", ":, fid].astype('uint8')) else: writer.write(imVideo[:, :, fid].astype('uint8')) # end timer tEnd", "ImageDraw import angles import cv2 import SimpleITK as sitk def", "def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs): for ext in saveext:", "# display video blnLoop = True fid = 0 while", "codec='DIVX', fps=30, isColor=False): # start timer tStart = time.time() #", "range(imVideo.shape[2]): imVideoOut[:, :, i] = AddTextOnImage(imVideo[:, :, i], strText, loc)", "'\\n' imInput = np.dstack(imInput) vidseq.release() return (imInput, metadata) def writeVideoToFile(imVideo,", "isColor=False): # start timer tStart = time.time() # write video", "if requested if resizeAmount: imCur = scipy.misc.imresize(imCur, resizeAmount) # show", "imB.std() return np.mean(imA * imB) def vis_checkerboard(im1, im2): im_chk =", "', vmode metadata['MODE'] = MODE # smooth if wanted if", "update progress fid += 1 curPercent = np.floor(100.0 * fid", "ImageDraw.Draw(imInputPIL) d.text(loc, strText, fill=color) return np.asarray(imInputPIL) def AddTextOnVideo(imVideo, strText, loc=(2,", "fy=resizeAmount) imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB) if len(strAnnotation) > 0: cv2.putText(imDisp,", "import scipy.signal import scipy.misc import scipy.ndimage.filters import matplotlib.pyplot as plt", "imInput = [] fid = 0 prevPercent = 0 print", "SimpleITK as sitk def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255),", "if imVideo[vid].shape[-1] > maxFrames: maxFrames = imVideo[vid].shape[2] # display video", "fig.canvas.draw() # Get the RGBA buffer from the figure w,", "in RGBA mode buf = np.roll(buf, 3, axis=2) return buf", "loc) return imVideoOut def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None): if not", "fileName, saveext=('.png', '.eps'), **kwargs): for ext in saveext: plt.savefig(os.path.join(saveDir, fileName", "video # fourcc = cv2.FOURCC(*list(codec)) # opencv 2.4 fourcc =", "import PIL from PIL import ImageDraw import angles import cv2", "/ numFrames) if curPercent > prevPercent: prevPercent = curPercent print", "k = cv2.waitKey(waitTime) & 0xff if blnLoop: if k ==", "= scipy.misc.imresize(imCur, resizeAmount) # show image cvShowImage(imCur, strWindowName[vid], '%d' %", "sigmaSmooth: wSmooth = 4 * sigmaSmooth + 1 print metadata", "ord(' '): blnLoop = False else: fid = (fid +", "[] fid = 0 prevPercent = 0 print '\\n' while", "range(len(imVideo)): curVideoFid = fid % imVideo[vid].shape[2] imCur = imVideo[vid][:, :,", "vidseq.isOpened() # print metadata metadata = {} numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT)", "print '\\tFRAME HEIGHT = ', frameHeight metadata['FRAME_HEIGHT'] = frameHeight frameWidth", "import ImageDraw import angles import cv2 import SimpleITK as sitk", "cv2.VideoCapture(dataFilePath) print vidseq, vidseq.isOpened() # print metadata metadata = {}", "', fmt metadata['FORMAT'] = fmt vmode = vidseq.get(cv2.CAP_PROP_MODE) if vmode", "= time.time() # write video # fourcc = cv2.FOURCC(*list(codec)) #", "frameSize=(width, height), isColor=isColor) print writer.isOpened() numFrames = imVideo.shape[-1] for fid", "0: print '\\FORMAT = ', fmt metadata['FORMAT'] = fmt vmode" ]
[ "= subject # smtp.sendmail() only converts CR and LF (produced", "= address.split(\"@\", 1) return \"%s@%s\" % (local, domain.lower()) def send(mailto,", "MIMEText(content.strip()) message[\"To\"] = mailto message[\"From\"] = macro.MACRO_EMAIL_ADDRESS message[\"Subject\"] = subject", "given address is not considered valid. \"\"\" address = address.strip()", "try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto], msg=msg_crlf, ) finally: smtp.quit() define.metric('increment', 'emails')", "normalized e-mail address to send this e-mail to. The system", "\"\"\" message = MIMEText(content.strip()) message[\"To\"] = mailto message[\"From\"] = macro.MACRO_EMAIL_ADDRESS", "as the sender. \"\"\" message = MIMEText(content.strip()) message[\"To\"] = mailto", "Converts an e-mail address to a consistent representation. Returns None", "need this: msg_crlf = re.sub(r\"\\r\\n|[\\r\\n]\", \"\\r\\n\", message.as_string()) smtp = SMTP(define.config_read_setting('host',", "LF (produced by MIMEText and our templates) to CRLF in", "None if the given address is not considered valid. \"\"\"", "In Python 2, we need this: msg_crlf = re.sub(r\"\\r\\n|[\\r\\n]\", \"\\r\\n\",", "absolute_import import re from email.mime.text import MIMEText from smtplib import", "not considered valid. \"\"\" address = address.strip() if not EMAIL_ADDRESS.match(address):", "by MIMEText and our templates) to CRLF in Python 3.", "\"localhost\", section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto], msg=msg_crlf, ) finally: smtp.quit()", "3. In Python 2, we need this: msg_crlf = re.sub(r\"\\r\\n|[\\r\\n]\",", "smtplib import SMTP from weasyl import define, macro EMAIL_ADDRESS =", "import define, macro EMAIL_ADDRESS = re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\") def normalize_address(address): \"\"\" Converts", "from smtplib import SMTP from weasyl import define, macro EMAIL_ADDRESS", "re from email.mime.text import MIMEText from smtplib import SMTP from", "CRLF in Python 3. In Python 2, we need this:", "# smtp.sendmail() only converts CR and LF (produced by MIMEText", "sender. \"\"\" message = MIMEText(content.strip()) message[\"To\"] = mailto message[\"From\"] =", "= mailto message[\"From\"] = macro.MACRO_EMAIL_ADDRESS message[\"Subject\"] = subject # smtp.sendmail()", "import re from email.mime.text import MIMEText from smtplib import SMTP", "to. The system email will be designated as the sender.", "templates) to CRLF in Python 3. In Python 2, we", "re.sub(r\"\\r\\n|[\\r\\n]\", \"\\r\\n\", message.as_string()) smtp = SMTP(define.config_read_setting('host', \"localhost\", section='smtp')) try: smtp.sendmail(", "The system email will be designated as the sender. \"\"\"", "address is not considered valid. \"\"\" address = address.strip() if", "local, domain = address.split(\"@\", 1) return \"%s@%s\" % (local, domain.lower())", "address to send this e-mail to. The system email will", "msg_crlf = re.sub(r\"\\r\\n|[\\r\\n]\", \"\\r\\n\", message.as_string()) smtp = SMTP(define.config_read_setting('host', \"localhost\", section='smtp'))", "Python 3. In Python 2, we need this: msg_crlf =", "MIMEText and our templates) to CRLF in Python 3. In", "address.strip() if not EMAIL_ADDRESS.match(address): return None local, domain = address.split(\"@\",", "designated as the sender. \"\"\" message = MIMEText(content.strip()) message[\"To\"] =", "from __future__ import absolute_import import re from email.mime.text import MIMEText", "content): \"\"\"Send an e-mail. `mailto` must be a normalized e-mail", "Python 2, we need this: msg_crlf = re.sub(r\"\\r\\n|[\\r\\n]\", \"\\r\\n\", message.as_string())", "subject, content): \"\"\"Send an e-mail. `mailto` must be a normalized", "= address.strip() if not EMAIL_ADDRESS.match(address): return None local, domain =", "(local, domain.lower()) def send(mailto, subject, content): \"\"\"Send an e-mail. `mailto`", "weasyl import define, macro EMAIL_ADDRESS = re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\") def normalize_address(address): \"\"\"", "mailto message[\"From\"] = macro.MACRO_EMAIL_ADDRESS message[\"Subject\"] = subject # smtp.sendmail() only", "e-mail to. The system email will be designated as the", "e-mail. `mailto` must be a normalized e-mail address to send", "`mailto` must be a normalized e-mail address to send this", "None local, domain = address.split(\"@\", 1) return \"%s@%s\" % (local,", "import absolute_import import re from email.mime.text import MIMEText from smtplib", "smtp.sendmail() only converts CR and LF (produced by MIMEText and", "we need this: msg_crlf = re.sub(r\"\\r\\n|[\\r\\n]\", \"\\r\\n\", message.as_string()) smtp =", "message = MIMEText(content.strip()) message[\"To\"] = mailto message[\"From\"] = macro.MACRO_EMAIL_ADDRESS message[\"Subject\"]", "an e-mail address to a consistent representation. Returns None if", "email.mime.text import MIMEText from smtplib import SMTP from weasyl import", "return \"%s@%s\" % (local, domain.lower()) def send(mailto, subject, content): \"\"\"Send", "representation. Returns None if the given address is not considered", "send this e-mail to. The system email will be designated", "must be a normalized e-mail address to send this e-mail", "e-mail address to send this e-mail to. The system email", "our templates) to CRLF in Python 3. In Python 2,", "= macro.MACRO_EMAIL_ADDRESS message[\"Subject\"] = subject # smtp.sendmail() only converts CR", "will be designated as the sender. \"\"\" message = MIMEText(content.strip())", "an e-mail. `mailto` must be a normalized e-mail address to", "= SMTP(define.config_read_setting('host', \"localhost\", section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto], msg=msg_crlf, )", "not EMAIL_ADDRESS.match(address): return None local, domain = address.split(\"@\", 1) return", "import MIMEText from smtplib import SMTP from weasyl import define,", "def send(mailto, subject, content): \"\"\"Send an e-mail. `mailto` must be", "CR and LF (produced by MIMEText and our templates) to", "this: msg_crlf = re.sub(r\"\\r\\n|[\\r\\n]\", \"\\r\\n\", message.as_string()) smtp = SMTP(define.config_read_setting('host', \"localhost\",", "import SMTP from weasyl import define, macro EMAIL_ADDRESS = re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\")", "EMAIL_ADDRESS.match(address): return None local, domain = address.split(\"@\", 1) return \"%s@%s\"", "a normalized e-mail address to send this e-mail to. The", "email will be designated as the sender. \"\"\" message =", "\"\"\" Converts an e-mail address to a consistent representation. Returns", "message[\"From\"] = macro.MACRO_EMAIL_ADDRESS message[\"Subject\"] = subject # smtp.sendmail() only converts", "address to a consistent representation. Returns None if the given", "be a normalized e-mail address to send this e-mail to.", "the sender. \"\"\" message = MIMEText(content.strip()) message[\"To\"] = mailto message[\"From\"]", "and our templates) to CRLF in Python 3. In Python", "= re.sub(r\"\\r\\n|[\\r\\n]\", \"\\r\\n\", message.as_string()) smtp = SMTP(define.config_read_setting('host', \"localhost\", section='smtp')) try:", "smtp = SMTP(define.config_read_setting('host', \"localhost\", section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto], msg=msg_crlf,", "return None local, domain = address.split(\"@\", 1) return \"%s@%s\" %", "converts CR and LF (produced by MIMEText and our templates)", "the given address is not considered valid. \"\"\" address =", "macro EMAIL_ADDRESS = re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\") def normalize_address(address): \"\"\" Converts an e-mail", "2, we need this: msg_crlf = re.sub(r\"\\r\\n|[\\r\\n]\", \"\\r\\n\", message.as_string()) smtp", "section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto], msg=msg_crlf, ) finally: smtp.quit() define.metric('increment',", "from email.mime.text import MIMEText from smtplib import SMTP from weasyl", "to a consistent representation. Returns None if the given address", "define, macro EMAIL_ADDRESS = re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\") def normalize_address(address): \"\"\" Converts an", "\"\"\"Send an e-mail. `mailto` must be a normalized e-mail address", "valid. \"\"\" address = address.strip() if not EMAIL_ADDRESS.match(address): return None", "consistent representation. Returns None if the given address is not", "__future__ import absolute_import import re from email.mime.text import MIMEText from", "<filename>weasyl/emailer.py from __future__ import absolute_import import re from email.mime.text import", "message[\"To\"] = mailto message[\"From\"] = macro.MACRO_EMAIL_ADDRESS message[\"Subject\"] = subject #", "1) return \"%s@%s\" % (local, domain.lower()) def send(mailto, subject, content):", "only converts CR and LF (produced by MIMEText and our", "be designated as the sender. \"\"\" message = MIMEText(content.strip()) message[\"To\"]", "\"%s@%s\" % (local, domain.lower()) def send(mailto, subject, content): \"\"\"Send an", "this e-mail to. The system email will be designated as", "EMAIL_ADDRESS = re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\") def normalize_address(address): \"\"\" Converts an e-mail address", "macro.MACRO_EMAIL_ADDRESS message[\"Subject\"] = subject # smtp.sendmail() only converts CR and", "and LF (produced by MIMEText and our templates) to CRLF", "domain.lower()) def send(mailto, subject, content): \"\"\"Send an e-mail. `mailto` must", "normalize_address(address): \"\"\" Converts an e-mail address to a consistent representation.", "% (local, domain.lower()) def send(mailto, subject, content): \"\"\"Send an e-mail.", "subject # smtp.sendmail() only converts CR and LF (produced by", "if not EMAIL_ADDRESS.match(address): return None local, domain = address.split(\"@\", 1)", "address = address.strip() if not EMAIL_ADDRESS.match(address): return None local, domain", "system email will be designated as the sender. \"\"\" message", "from weasyl import define, macro EMAIL_ADDRESS = re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\") def normalize_address(address):", "= re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\") def normalize_address(address): \"\"\" Converts an e-mail address to", "re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\") def normalize_address(address): \"\"\" Converts an e-mail address to a", "domain = address.split(\"@\", 1) return \"%s@%s\" % (local, domain.lower()) def", "MIMEText from smtplib import SMTP from weasyl import define, macro", "SMTP from weasyl import define, macro EMAIL_ADDRESS = re.compile(r\"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+\\Z\") def", "a consistent representation. Returns None if the given address is", "def normalize_address(address): \"\"\" Converts an e-mail address to a consistent", "considered valid. \"\"\" address = address.strip() if not EMAIL_ADDRESS.match(address): return", "address.split(\"@\", 1) return \"%s@%s\" % (local, domain.lower()) def send(mailto, subject,", "message.as_string()) smtp = SMTP(define.config_read_setting('host', \"localhost\", section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto],", "Returns None if the given address is not considered valid.", "send(mailto, subject, content): \"\"\"Send an e-mail. `mailto` must be a", "to CRLF in Python 3. In Python 2, we need", "\"\"\" address = address.strip() if not EMAIL_ADDRESS.match(address): return None local,", "e-mail address to a consistent representation. Returns None if the", "= MIMEText(content.strip()) message[\"To\"] = mailto message[\"From\"] = macro.MACRO_EMAIL_ADDRESS message[\"Subject\"] =", "\"\\r\\n\", message.as_string()) smtp = SMTP(define.config_read_setting('host', \"localhost\", section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS,", "if the given address is not considered valid. \"\"\" address", "is not considered valid. \"\"\" address = address.strip() if not", "in Python 3. In Python 2, we need this: msg_crlf", "SMTP(define.config_read_setting('host', \"localhost\", section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto], msg=msg_crlf, ) finally:", "to send this e-mail to. The system email will be", "message[\"Subject\"] = subject # smtp.sendmail() only converts CR and LF", "(produced by MIMEText and our templates) to CRLF in Python" ]
[ "warning): app.builder.build_all() assert \"build succeeded\" not in status.getvalue() warnings =", "\"build succeeded\" not in status.getvalue() warnings = warning.getvalue().strip() assert 'WARNING:", "Requested child \"http://example.org/system/Missing\" of \"http://example.org/system/ErrorMissingProcess\" is not a Process' in", "rdflib.namespace import RDF, RDFS from sphinx_probs_rdf.directives import PROBS SYS =", "from sphinx_probs_rdf.directives import PROBS SYS = Namespace(\"http://example.org/system/\") @pytest.mark.sphinx( 'probs_rdf', testroot='missing',", "Namespace(\"http://example.org/system/\") @pytest.mark.sphinx( 'probs_rdf', testroot='missing', confoverrides={'probs_rdf_system_prefix': str(SYS)}) def test_builder_reports_warning_for_missing_process(app, status, warning):", "Literal from rdflib.namespace import RDF, RDFS from sphinx_probs_rdf.directives import PROBS", "not in status.getvalue() warnings = warning.getvalue().strip() assert 'WARNING: Requested child", "app.builder.build_all() assert \"build succeeded\" not in status.getvalue() warnings = warning.getvalue().strip()", "Graph, Namespace, Literal from rdflib.namespace import RDF, RDFS from sphinx_probs_rdf.directives", "import RDF, RDFS from sphinx_probs_rdf.directives import PROBS SYS = Namespace(\"http://example.org/system/\")", "import PROBS SYS = Namespace(\"http://example.org/system/\") @pytest.mark.sphinx( 'probs_rdf', testroot='missing', confoverrides={'probs_rdf_system_prefix': str(SYS)})", "= Namespace(\"http://example.org/system/\") @pytest.mark.sphinx( 'probs_rdf', testroot='missing', confoverrides={'probs_rdf_system_prefix': str(SYS)}) def test_builder_reports_warning_for_missing_process(app, status,", "status, warning): app.builder.build_all() assert \"build succeeded\" not in status.getvalue() warnings", "in status.getvalue() warnings = warning.getvalue().strip() assert 'WARNING: Requested child \"http://example.org/system/Missing\"", "warning.getvalue().strip() assert 'WARNING: Requested child \"http://example.org/system/Missing\" of \"http://example.org/system/ErrorMissingProcess\" is not", "from rdflib.namespace import RDF, RDFS from sphinx_probs_rdf.directives import PROBS SYS", "warnings = warning.getvalue().strip() assert 'WARNING: Requested child \"http://example.org/system/Missing\" of \"http://example.org/system/ErrorMissingProcess\"", "testroot='missing', confoverrides={'probs_rdf_system_prefix': str(SYS)}) def test_builder_reports_warning_for_missing_process(app, status, warning): app.builder.build_all() assert \"build", "assert \"build succeeded\" not in status.getvalue() warnings = warning.getvalue().strip() assert", "= warning.getvalue().strip() assert 'WARNING: Requested child \"http://example.org/system/Missing\" of \"http://example.org/system/ErrorMissingProcess\" is", "status.getvalue() warnings = warning.getvalue().strip() assert 'WARNING: Requested child \"http://example.org/system/Missing\" of", "SYS = Namespace(\"http://example.org/system/\") @pytest.mark.sphinx( 'probs_rdf', testroot='missing', confoverrides={'probs_rdf_system_prefix': str(SYS)}) def test_builder_reports_warning_for_missing_process(app,", "@pytest.mark.sphinx( 'probs_rdf', testroot='missing', confoverrides={'probs_rdf_system_prefix': str(SYS)}) def test_builder_reports_warning_for_missing_process(app, status, warning): app.builder.build_all()", "rdflib import Graph, Namespace, Literal from rdflib.namespace import RDF, RDFS", "RDF, RDFS from sphinx_probs_rdf.directives import PROBS SYS = Namespace(\"http://example.org/system/\") @pytest.mark.sphinx(", "str(SYS)}) def test_builder_reports_warning_for_missing_process(app, status, warning): app.builder.build_all() assert \"build succeeded\" not", "test_builder_reports_warning_for_missing_process(app, status, warning): app.builder.build_all() assert \"build succeeded\" not in status.getvalue()", "confoverrides={'probs_rdf_system_prefix': str(SYS)}) def test_builder_reports_warning_for_missing_process(app, status, warning): app.builder.build_all() assert \"build succeeded\"", "import pytest from rdflib import Graph, Namespace, Literal from rdflib.namespace", "def test_builder_reports_warning_for_missing_process(app, status, warning): app.builder.build_all() assert \"build succeeded\" not in", "assert 'WARNING: Requested child \"http://example.org/system/Missing\" of \"http://example.org/system/ErrorMissingProcess\" is not a", "Namespace, Literal from rdflib.namespace import RDF, RDFS from sphinx_probs_rdf.directives import", "PROBS SYS = Namespace(\"http://example.org/system/\") @pytest.mark.sphinx( 'probs_rdf', testroot='missing', confoverrides={'probs_rdf_system_prefix': str(SYS)}) def", "RDFS from sphinx_probs_rdf.directives import PROBS SYS = Namespace(\"http://example.org/system/\") @pytest.mark.sphinx( 'probs_rdf',", "succeeded\" not in status.getvalue() warnings = warning.getvalue().strip() assert 'WARNING: Requested", "'WARNING: Requested child \"http://example.org/system/Missing\" of \"http://example.org/system/ErrorMissingProcess\" is not a Process'", "import Graph, Namespace, Literal from rdflib.namespace import RDF, RDFS from", "'probs_rdf', testroot='missing', confoverrides={'probs_rdf_system_prefix': str(SYS)}) def test_builder_reports_warning_for_missing_process(app, status, warning): app.builder.build_all() assert", "sphinx_probs_rdf.directives import PROBS SYS = Namespace(\"http://example.org/system/\") @pytest.mark.sphinx( 'probs_rdf', testroot='missing', confoverrides={'probs_rdf_system_prefix':", "child \"http://example.org/system/Missing\" of \"http://example.org/system/ErrorMissingProcess\" is not a Process' in warnings", "from rdflib import Graph, Namespace, Literal from rdflib.namespace import RDF,", "pytest from rdflib import Graph, Namespace, Literal from rdflib.namespace import", "<filename>tests/test_missing_process.py import pytest from rdflib import Graph, Namespace, Literal from" ]
[ "if analysis=='Yes': levels=['Level 1','Level 2','Level 3'] #calculating the score of", "bar chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add title plt.xlabel('Levels')#set x-axis label plt.ylabel('Scores')#set", "unique values in array along with their counts vals, uni_val_counts", "of game analysis=input('\\nDo you want to see your game analysis?", "numPy array of values with only one mode arr_val =", "analysis? (Yes/No) ') if analysis=='Yes': levels=['Level 1','Level 2','Level 3'] #calculating", "') if analysis=='Yes': levels=['Level 1','Level 2','Level 3'] #calculating the score", "#Mode calculation #create numPy array of values with only one", "Deviation: ',statistics.stdev(level_score)) print('\\nGood Bye.See you later!!!') elif analysis=='No': print('\\nGood Bye.See", "elif analysis=='No': print('\\nGood Bye.See you later!!!') else: print('Invalid value enter')", "#find median value print('\\nMediand: ',statistics.median(level_score)) #Mode calculation #create numPy array", "np.max(uni_val_counts)) print('\\nMode: ',vals[mode_value].flatten().tolist()) #find variance print('\\nVariance: ',np.var(level_score)) #find standard deviation", "y-axis label plt.show() print('\\nDescriptive Statistics of Scores:') #find mean value", "1','Level 2','Level 3'] #calculating the score of levels l1_score= c1*10", "print('\\nMean: ',statistics.mean(level_score)) #find median value print('\\nMediand: ',statistics.median(level_score)) #Mode calculation #create", "counts vals, uni_val_counts = np.unique(arr_val, return_counts=True) #find mode mode_value =", "see your game analysis? (Yes/No) ') if analysis=='Yes': levels=['Level 1','Level", "label plt.show() print('\\nDescriptive Statistics of Scores:') #find mean value print('\\nMean:", "chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add title plt.xlabel('Levels')#set x-axis label plt.ylabel('Scores')#set y-axis", "your game analysis? (Yes/No) ') if analysis=='Yes': levels=['Level 1','Level 2','Level", "calculation #create numPy array of values with only one mode", "game analysis? (Yes/No) ') if analysis=='Yes': levels=['Level 1','Level 2','Level 3']", "plt.show() print('\\nDescriptive Statistics of Scores:') #find mean value print('\\nMean: ',statistics.mean(level_score))", "x-axis label plt.ylabel('Scores')#set y-axis label plt.show() print('\\nDescriptive Statistics of Scores:')", "mean value print('\\nMean: ',statistics.mean(level_score)) #find median value print('\\nMediand: ',statistics.median(level_score)) #Mode", "along with their counts vals, uni_val_counts = np.unique(arr_val, return_counts=True) #find", "3'] #calculating the score of levels l1_score= c1*10 l2_score= c2*10", "plt.xlabel('Levels')#set x-axis label plt.ylabel('Scores')#set y-axis label plt.show() print('\\nDescriptive Statistics of", "you later!!!') elif analysis=='No': print('\\nGood Bye.See you later!!!') else: print('Invalid", "print('\\nMode: ',vals[mode_value].flatten().tolist()) #find variance print('\\nVariance: ',np.var(level_score)) #find standard deviation print('\\nStandard", "deviation print('\\nStandard Deviation: ',statistics.stdev(level_score)) print('\\nGood Bye.See you later!!!') elif analysis=='No':", "title plt.xlabel('Levels')#set x-axis label plt.ylabel('Scores')#set y-axis label plt.show() print('\\nDescriptive Statistics", "def stat_analysis(c1,c2,c3): #ask question for viewing analysis of game analysis=input('\\nDo", "median value print('\\nMediand: ',statistics.median(level_score)) #Mode calculation #create numPy array of", "= np.argwhere(counts == np.max(uni_val_counts)) print('\\nMode: ',vals[mode_value].flatten().tolist()) #find variance print('\\nVariance: ',np.var(level_score))", "levels l1_score= c1*10 l2_score= c2*10 l3_score= c3*10 level_score=[l1_score,l2_score,l3_score] #plot bar", "np.array(level_score) #find unique values in array along with their counts", "value print('\\nMean: ',statistics.mean(level_score)) #find median value print('\\nMediand: ',statistics.median(level_score)) #Mode calculation", "plt.title('Levelwise Scores',fontsize=16)#add title plt.xlabel('Levels')#set x-axis label plt.ylabel('Scores')#set y-axis label plt.show()", "Statistics of Scores:') #find mean value print('\\nMean: ',statistics.mean(level_score)) #find median", "level game def stat_analysis(c1,c2,c3): #ask question for viewing analysis of", "want to see your game analysis? (Yes/No) ') if analysis=='Yes':", "level_score=[l1_score,l2_score,l3_score] #plot bar chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add title plt.xlabel('Levels')#set x-axis", "= np.unique(arr_val, return_counts=True) #find mode mode_value = np.argwhere(counts == np.max(uni_val_counts))", "#find mode mode_value = np.argwhere(counts == np.max(uni_val_counts)) print('\\nMode: ',vals[mode_value].flatten().tolist()) #find", "<reponame>thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis #analysis function for three level game def stat_analysis(c1,c2,c3): #ask", "c3*10 level_score=[l1_score,l2_score,l3_score] #plot bar chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add title plt.xlabel('Levels')#set", "#ask question for viewing analysis of game analysis=input('\\nDo you want", "mode mode_value = np.argwhere(counts == np.max(uni_val_counts)) print('\\nMode: ',vals[mode_value].flatten().tolist()) #find variance", "three level game def stat_analysis(c1,c2,c3): #ask question for viewing analysis", "stat_analysis(c1,c2,c3): #ask question for viewing analysis of game analysis=input('\\nDo you", "viewing analysis of game analysis=input('\\nDo you want to see your", "to see your game analysis? (Yes/No) ') if analysis=='Yes': levels=['Level", "np.unique(arr_val, return_counts=True) #find mode mode_value = np.argwhere(counts == np.max(uni_val_counts)) print('\\nMode:", "#calculating the score of levels l1_score= c1*10 l2_score= c2*10 l3_score=", "for viewing analysis of game analysis=input('\\nDo you want to see", "game analysis=input('\\nDo you want to see your game analysis? (Yes/No)", "#plot bar chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add title plt.xlabel('Levels')#set x-axis label", "of Scores:') #find mean value print('\\nMean: ',statistics.mean(level_score)) #find median value", "values with only one mode arr_val = np.array(level_score) #find unique", "function for three level game def stat_analysis(c1,c2,c3): #ask question for", "with only one mode arr_val = np.array(level_score) #find unique values", "(Yes/No) ') if analysis=='Yes': levels=['Level 1','Level 2','Level 3'] #calculating the", "Scores:') #find mean value print('\\nMean: ',statistics.mean(level_score)) #find median value print('\\nMediand:", "array of values with only one mode arr_val = np.array(level_score)", "with their counts vals, uni_val_counts = np.unique(arr_val, return_counts=True) #find mode", "analysis=='Yes': levels=['Level 1','Level 2','Level 3'] #calculating the score of levels", "game def stat_analysis(c1,c2,c3): #ask question for viewing analysis of game", "l2_score= c2*10 l3_score= c3*10 level_score=[l1_score,l2_score,l3_score] #plot bar chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise", "the score of levels l1_score= c1*10 l2_score= c2*10 l3_score= c3*10", "np.argwhere(counts == np.max(uni_val_counts)) print('\\nMode: ',vals[mode_value].flatten().tolist()) #find variance print('\\nVariance: ',np.var(level_score)) #find", "only one mode arr_val = np.array(level_score) #find unique values in", "#find variance print('\\nVariance: ',np.var(level_score)) #find standard deviation print('\\nStandard Deviation: ',statistics.stdev(level_score))", "',statistics.stdev(level_score)) print('\\nGood Bye.See you later!!!') elif analysis=='No': print('\\nGood Bye.See you", "c1*10 l2_score= c2*10 l3_score= c3*10 level_score=[l1_score,l2_score,l3_score] #plot bar chart plt.bar(levels,level_score,color='blue',edgecolor='black')", "plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add title plt.xlabel('Levels')#set x-axis label plt.ylabel('Scores')#set y-axis label", "arr_val = np.array(level_score) #find unique values in array along with", "2','Level 3'] #calculating the score of levels l1_score= c1*10 l2_score=", "their counts vals, uni_val_counts = np.unique(arr_val, return_counts=True) #find mode mode_value", "',vals[mode_value].flatten().tolist()) #find variance print('\\nVariance: ',np.var(level_score)) #find standard deviation print('\\nStandard Deviation:", "variance print('\\nVariance: ',np.var(level_score)) #find standard deviation print('\\nStandard Deviation: ',statistics.stdev(level_score)) print('\\nGood", "l3_score= c3*10 level_score=[l1_score,l2_score,l3_score] #plot bar chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add title", "#find mean value print('\\nMean: ',statistics.mean(level_score)) #find median value print('\\nMediand: ',statistics.median(level_score))", "c2*10 l3_score= c3*10 level_score=[l1_score,l2_score,l3_score] #plot bar chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add", "one mode arr_val = np.array(level_score) #find unique values in array", "values in array along with their counts vals, uni_val_counts =", "analysis=input('\\nDo you want to see your game analysis? (Yes/No) ')", "score of levels l1_score= c1*10 l2_score= c2*10 l3_score= c3*10 level_score=[l1_score,l2_score,l3_score]", "mode arr_val = np.array(level_score) #find unique values in array along", "#create numPy array of values with only one mode arr_val", "print('\\nGood Bye.See you later!!!') elif analysis=='No': print('\\nGood Bye.See you later!!!')", "#find unique values in array along with their counts vals,", "plt.ylabel('Scores')#set y-axis label plt.show() print('\\nDescriptive Statistics of Scores:') #find mean", "= np.array(level_score) #find unique values in array along with their", "of values with only one mode arr_val = np.array(level_score) #find", "mode_value = np.argwhere(counts == np.max(uni_val_counts)) print('\\nMode: ',vals[mode_value].flatten().tolist()) #find variance print('\\nVariance:", "label plt.ylabel('Scores')#set y-axis label plt.show() print('\\nDescriptive Statistics of Scores:') #find", "value print('\\nMediand: ',statistics.median(level_score)) #Mode calculation #create numPy array of values", "analysis=='No': print('\\nGood Bye.See you later!!!') else: print('Invalid value enter') stat_analysis(c1,c2,c3)", "',statistics.mean(level_score)) #find median value print('\\nMediand: ',statistics.median(level_score)) #Mode calculation #create numPy", "',statistics.median(level_score)) #Mode calculation #create numPy array of values with only", "l1_score= c1*10 l2_score= c2*10 l3_score= c3*10 level_score=[l1_score,l2_score,l3_score] #plot bar chart", "question for viewing analysis of game analysis=input('\\nDo you want to", "vals, uni_val_counts = np.unique(arr_val, return_counts=True) #find mode mode_value = np.argwhere(counts", "in array along with their counts vals, uni_val_counts = np.unique(arr_val,", "#find standard deviation print('\\nStandard Deviation: ',statistics.stdev(level_score)) print('\\nGood Bye.See you later!!!')", "#analysis function for three level game def stat_analysis(c1,c2,c3): #ask question", "== np.max(uni_val_counts)) print('\\nMode: ',vals[mode_value].flatten().tolist()) #find variance print('\\nVariance: ',np.var(level_score)) #find standard", "Scores',fontsize=16)#add title plt.xlabel('Levels')#set x-axis label plt.ylabel('Scores')#set y-axis label plt.show() print('\\nDescriptive", "you want to see your game analysis? (Yes/No) ') if", "print('\\nStandard Deviation: ',statistics.stdev(level_score)) print('\\nGood Bye.See you later!!!') elif analysis=='No': print('\\nGood", "later!!!') elif analysis=='No': print('\\nGood Bye.See you later!!!') else: print('Invalid value", "standard deviation print('\\nStandard Deviation: ',statistics.stdev(level_score)) print('\\nGood Bye.See you later!!!') elif", "for three level game def stat_analysis(c1,c2,c3): #ask question for viewing", "',np.var(level_score)) #find standard deviation print('\\nStandard Deviation: ',statistics.stdev(level_score)) print('\\nGood Bye.See you", "print('\\nDescriptive Statistics of Scores:') #find mean value print('\\nMean: ',statistics.mean(level_score)) #find", "print('\\nMediand: ',statistics.median(level_score)) #Mode calculation #create numPy array of values with", "levels=['Level 1','Level 2','Level 3'] #calculating the score of levels l1_score=", "of levels l1_score= c1*10 l2_score= c2*10 l3_score= c3*10 level_score=[l1_score,l2_score,l3_score] #plot", "array along with their counts vals, uni_val_counts = np.unique(arr_val, return_counts=True)", "return_counts=True) #find mode mode_value = np.argwhere(counts == np.max(uni_val_counts)) print('\\nMode: ',vals[mode_value].flatten().tolist())", "print('\\nVariance: ',np.var(level_score)) #find standard deviation print('\\nStandard Deviation: ',statistics.stdev(level_score)) print('\\nGood Bye.See", "uni_val_counts = np.unique(arr_val, return_counts=True) #find mode mode_value = np.argwhere(counts ==", "analysis of game analysis=input('\\nDo you want to see your game", "Bye.See you later!!!') elif analysis=='No': print('\\nGood Bye.See you later!!!') else:" ]
[ "= 'mspaint' # replace with another editor if Unix rootdir", "in degrees vcen = (1,0,0) # x y z coords", "2022 by <NAME> | | [<EMAIL>] | |-----------------------------------| | We", "40 # height of cone deganglestep = 5 # how", "= 0 # outline color rotation = rotvec3D(25,240,70) # rotation", "cf = getRGBfactors() # color list color = cf['brightyellow'] #", "[<EMAIL>] | |-----------------------------------| | We make absolutely no warranty |", "0 # outline color rotation = rotvec3D(25,240,70) # rotation vector", "# x y z coords r = 40 # radius", "cf = getRGBfactors() # color info with presets d, translationvector", "= 40 # height of cone deganglestep = 5 #", "with presets d, translationvector = 400, [0, 0, 200] #", "presets d, translationvector = 400, [0, 0, 200] # be", "getRGBfactors, rotvec3D, conevertandsurface, saveBMP ) import subprocess as proc from", "my, 24) # RGB bmp cenpt = centercoord(bmp) # helper", "to get center of a bitmap cf = getRGBfactors() #", "= cf['brightyellow'] # color of solid outlinecolor = 0 #", "= getRGBfactors() # color info with presets d, translationvector =", "of solid outlinecolor = 0 # outline color rotation =", "color rotation = rotvec3D(25,240,70) # rotation vector (x,y,z) in degrees", "script mx = my = 250 # x=y square bmp", "'HelloCone.bmp' # some random file name as string bmp =", "= False # can show outline even if solid cf", "= conevertandsurface(vcen, r, zlen, deganglestep)# A solid is defined by", "any kind, expressed or implied | |-----------------------------------| | This graphics", "deganglestep)# A solid is defined by vertices and surfaces plot3Dsolid(bmp,", "400, [0, 0, 200] # be careful with these variables", "isSolid, color, showoutline, outlinecolor, rotation, translationvector, d, cenpt) saveBMP(file, bmp)", "radius of cone zlen = 40 # height of cone", "| This graphics library outputs | | to a bitmap", "color of solid outlinecolor = 0 # outline color rotation", "graphics library outputs | | to a bitmap file. |", "| We make absolutely no warranty | | of any", "file. | ----------------------------------- \"\"\" from Python_BMP.BITMAPlib import( newBMP, centercoord, plot3Dsolid,", "y z coords r = 40 # radius of cone", "# toggle solid or outline showoutline = False # can", "outlinecolor = 0 # outline color rotation = rotvec3D(25,240,70) #", "(file, rootdir, imgedt)) ret = proc.call([imgedt, file]) if __name__==\"__main__\": main()", "around the cone obj3D = conevertandsurface(vcen, r, zlen, deganglestep)# A", "# x=y square bmp file = 'HelloCone.bmp' # some random", "# be careful with these variables or object goes offscreen", "Copyright 2022 by <NAME> | | [<EMAIL>] | |-----------------------------------| |", "|-----------------------------------| | We make absolutely no warranty | | of", "be careful with these variables or object goes offscreen isSolid", "= (1,0,0) # x y z coords r = 40", "bmp = newBMP(mx, my, 24) # RGB bmp cenpt =", "rootdir = path.dirname(__file__) # get path of this script mx", "careful with these variables or object goes offscreen isSolid =", "some random file name as string bmp = newBMP(mx, my,", "bitmap file. | ----------------------------------- \"\"\" from Python_BMP.BITMAPlib import( newBMP, centercoord,", "solid cf = getRGBfactors() # color list color = cf['brightyellow']", "solid outlinecolor = 0 # outline color rotation = rotvec3D(25,240,70)", "= getRGBfactors() # color list color = cf['brightyellow'] # color", "| [<EMAIL>] | |-----------------------------------| | We make absolutely no warranty", "surfaces around the cone obj3D = conevertandsurface(vcen, r, zlen, deganglestep)#", "# helper method to get center of a bitmap cf", "import( newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP ) import", "cone zlen = 40 # height of cone deganglestep =", "getRGBfactors() # color list color = cf['brightyellow'] # color of", "centercoord(bmp) # helper method to get center of a bitmap", "string bmp = newBMP(mx, my, 24) # RGB bmp cenpt", "tile flat surfaces around the cone obj3D = conevertandsurface(vcen, r,", "This graphics library outputs | | to a bitmap file.", "bmp cenpt = centercoord(bmp) # helper method to get center", "of this script mx = my = 250 # x=y", "# some random file name as string bmp = newBMP(mx,", "A solid is defined by vertices and surfaces plot3Dsolid(bmp, obj3D,", "[0, 0, 200] # be careful with these variables or", "| |-----------------------------------| | This graphics library outputs | | to", "= my = 250 # x=y square bmp file =", "|-----------------------------------| | This graphics library outputs | | to a", "Unix rootdir = path.dirname(__file__) # get path of this script", "= 'HelloCone.bmp' # some random file name as string bmp", "of a bitmap cf = getRGBfactors() # color info with", "plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP ) import subprocess as proc", "200] # be careful with these variables or object goes", "r = 40 # radius of cone zlen = 40", "Cone Demo ----------------------------------- | Copyright 2022 by <NAME> | |", "library outputs | | to a bitmap file. | -----------------------------------", "# RGB bmp cenpt = centercoord(bmp) # helper method to", "import path def main(): print(notice) imgedt = 'mspaint' # replace", "conevertandsurface, saveBMP ) import subprocess as proc from os import", "color info with presets d, translationvector = 400, [0, 0,", "vcen = (1,0,0) # x y z coords r =", "z coords r = 40 # radius of cone zlen", "saveBMP(file, bmp) # save file print('Saved to %s in %s\\nAll", "notice = \"\"\" Cone Demo ----------------------------------- | Copyright 2022 by", "height of cone deganglestep = 5 # how finely we", "as string bmp = newBMP(mx, my, 24) # RGB bmp", "solid or outline showoutline = False # can show outline", "= 40 # radius of cone zlen = 40 #", "'mspaint' # replace with another editor if Unix rootdir =", "# height of cone deganglestep = 5 # how finely", "d, cenpt) saveBMP(file, bmp) # save file print('Saved to %s", "replace with another editor if Unix rootdir = path.dirname(__file__) #", "toggle solid or outline showoutline = False # can show", "cone deganglestep = 5 # how finely we tile flat", "| | of any kind, expressed or implied | |-----------------------------------|", "# can show outline even if solid cf = getRGBfactors()", "offscreen isSolid = True # toggle solid or outline showoutline", "We make absolutely no warranty | | of any kind,", "kind, expressed or implied | |-----------------------------------| | This graphics library", "plot3Dsolid(bmp, obj3D, isSolid, color, showoutline, outlinecolor, rotation, translationvector, d, cenpt)", "get center of a bitmap cf = getRGBfactors() # color", "Demo ----------------------------------- | Copyright 2022 by <NAME> | | [<EMAIL>]", "\"\"\" Cone Demo ----------------------------------- | Copyright 2022 by <NAME> |", "| | to a bitmap file. | ----------------------------------- \"\"\" from", "even if solid cf = getRGBfactors() # color list color", "40 # radius of cone zlen = 40 # height", "| | [<EMAIL>] | |-----------------------------------| | We make absolutely no", "a bitmap file. | ----------------------------------- \"\"\" from Python_BMP.BITMAPlib import( newBMP,", "# radius of cone zlen = 40 # height of", "with another editor if Unix rootdir = path.dirname(__file__) # get", "surfaces plot3Dsolid(bmp, obj3D, isSolid, color, showoutline, outlinecolor, rotation, translationvector, d,", "in %s\\nAll done close %s to finish' % \\ (file,", "% \\ (file, rootdir, imgedt)) ret = proc.call([imgedt, file]) if", "| |-----------------------------------| | We make absolutely no warranty | |", "# color of solid outlinecolor = 0 # outline color", "my = 250 # x=y square bmp file = 'HelloCone.bmp'", "x y z coords r = 40 # radius of", "= 250 # x=y square bmp file = 'HelloCone.bmp' #", "or object goes offscreen isSolid = True # toggle solid", "object goes offscreen isSolid = True # toggle solid or", "showoutline, outlinecolor, rotation, translationvector, d, cenpt) saveBMP(file, bmp) # save", "done close %s to finish' % \\ (file, rootdir, imgedt))", "from Python_BMP.BITMAPlib import( newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP", "def main(): print(notice) imgedt = 'mspaint' # replace with another", "solid is defined by vertices and surfaces plot3Dsolid(bmp, obj3D, isSolid,", "24) # RGB bmp cenpt = centercoord(bmp) # helper method", "these variables or object goes offscreen isSolid = True #", "make absolutely no warranty | | of any kind, expressed", "finely we tile flat surfaces around the cone obj3D =", "translationvector, d, cenpt) saveBMP(file, bmp) # save file print('Saved to", "= \"\"\" Cone Demo ----------------------------------- | Copyright 2022 by <NAME>", "translationvector = 400, [0, 0, 200] # be careful with", "vector (x,y,z) in degrees vcen = (1,0,0) # x y", "%s to finish' % \\ (file, rootdir, imgedt)) ret =", "of any kind, expressed or implied | |-----------------------------------| | This", "cf['brightyellow'] # color of solid outlinecolor = 0 # outline", "os import path def main(): print(notice) imgedt = 'mspaint' #", "can show outline even if solid cf = getRGBfactors() #", "list color = cf['brightyellow'] # color of solid outlinecolor =", "| of any kind, expressed or implied | |-----------------------------------| |", "cenpt = centercoord(bmp) # helper method to get center of", "show outline even if solid cf = getRGBfactors() # color", "<NAME> | | [<EMAIL>] | |-----------------------------------| | We make absolutely", "print(notice) imgedt = 'mspaint' # replace with another editor if", "variables or object goes offscreen isSolid = True # toggle", "path.dirname(__file__) # get path of this script mx = my", "(1,0,0) # x y z coords r = 40 #", "zlen, deganglestep)# A solid is defined by vertices and surfaces", "%s in %s\\nAll done close %s to finish' % \\", "deganglestep = 5 # how finely we tile flat surfaces", "or implied | |-----------------------------------| | This graphics library outputs |", "newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP ) import subprocess", ") import subprocess as proc from os import path def", "a bitmap cf = getRGBfactors() # color info with presets", "outline showoutline = False # can show outline even if", "250 # x=y square bmp file = 'HelloCone.bmp' # some", "isSolid = True # toggle solid or outline showoutline =", "showoutline = False # can show outline even if solid", "square bmp file = 'HelloCone.bmp' # some random file name", "5 # how finely we tile flat surfaces around the", "by vertices and surfaces plot3Dsolid(bmp, obj3D, isSolid, color, showoutline, outlinecolor,", "\\ (file, rootdir, imgedt)) ret = proc.call([imgedt, file]) if __name__==\"__main__\":", "| ----------------------------------- \"\"\" from Python_BMP.BITMAPlib import( newBMP, centercoord, plot3Dsolid, getRGBfactors,", "| to a bitmap file. | ----------------------------------- \"\"\" from Python_BMP.BITMAPlib", "saveBMP ) import subprocess as proc from os import path", "proc from os import path def main(): print(notice) imgedt =", "x=y square bmp file = 'HelloCone.bmp' # some random file", "True # toggle solid or outline showoutline = False #", "# how finely we tile flat surfaces around the cone", "close %s to finish' % \\ (file, rootdir, imgedt)) ret", "another editor if Unix rootdir = path.dirname(__file__) # get path", "as proc from os import path def main(): print(notice) imgedt", "rotation = rotvec3D(25,240,70) # rotation vector (x,y,z) in degrees vcen", "----------------------------------- \"\"\" from Python_BMP.BITMAPlib import( newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D,", "defined by vertices and surfaces plot3Dsolid(bmp, obj3D, isSolid, color, showoutline,", "| Copyright 2022 by <NAME> | | [<EMAIL>] | |-----------------------------------|", "obj3D, isSolid, color, showoutline, outlinecolor, rotation, translationvector, d, cenpt) saveBMP(file,", "finish' % \\ (file, rootdir, imgedt)) ret = proc.call([imgedt, file])", "degrees vcen = (1,0,0) # x y z coords r", "= newBMP(mx, my, 24) # RGB bmp cenpt = centercoord(bmp)", "(x,y,z) in degrees vcen = (1,0,0) # x y z", "of cone zlen = 40 # height of cone deganglestep", "outlinecolor, rotation, translationvector, d, cenpt) saveBMP(file, bmp) # save file", "print('Saved to %s in %s\\nAll done close %s to finish'", "= rotvec3D(25,240,70) # rotation vector (x,y,z) in degrees vcen =", "rotation vector (x,y,z) in degrees vcen = (1,0,0) # x", "info with presets d, translationvector = 400, [0, 0, 200]", "\"\"\" from Python_BMP.BITMAPlib import( newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface,", "path of this script mx = my = 250 #", "with these variables or object goes offscreen isSolid = True", "the cone obj3D = conevertandsurface(vcen, r, zlen, deganglestep)# A solid", "cenpt) saveBMP(file, bmp) # save file print('Saved to %s in", "no warranty | | of any kind, expressed or implied", "save file print('Saved to %s in %s\\nAll done close %s", "to finish' % \\ (file, rootdir, imgedt)) ret = proc.call([imgedt,", "color list color = cf['brightyellow'] # color of solid outlinecolor", "rotation, translationvector, d, cenpt) saveBMP(file, bmp) # save file print('Saved", "rotvec3D, conevertandsurface, saveBMP ) import subprocess as proc from os", "getRGBfactors() # color info with presets d, translationvector = 400,", "path def main(): print(notice) imgedt = 'mspaint' # replace with", "file print('Saved to %s in %s\\nAll done close %s to", "# color info with presets d, translationvector = 400, [0,", "d, translationvector = 400, [0, 0, 200] # be careful", "and surfaces plot3Dsolid(bmp, obj3D, isSolid, color, showoutline, outlinecolor, rotation, translationvector,", "# save file print('Saved to %s in %s\\nAll done close", "vertices and surfaces plot3Dsolid(bmp, obj3D, isSolid, color, showoutline, outlinecolor, rotation,", "r, zlen, deganglestep)# A solid is defined by vertices and", "# rotation vector (x,y,z) in degrees vcen = (1,0,0) #", "if Unix rootdir = path.dirname(__file__) # get path of this", "= 400, [0, 0, 200] # be careful with these", "----------------------------------- | Copyright 2022 by <NAME> | | [<EMAIL>] |", "file = 'HelloCone.bmp' # some random file name as string", "goes offscreen isSolid = True # toggle solid or outline", "outline color rotation = rotvec3D(25,240,70) # rotation vector (x,y,z) in", "we tile flat surfaces around the cone obj3D = conevertandsurface(vcen,", "if solid cf = getRGBfactors() # color list color =", "color, showoutline, outlinecolor, rotation, translationvector, d, cenpt) saveBMP(file, bmp) #", "absolutely no warranty | | of any kind, expressed or", "to %s in %s\\nAll done close %s to finish' %", "# get path of this script mx = my =", "# replace with another editor if Unix rootdir = path.dirname(__file__)", "from os import path def main(): print(notice) imgedt = 'mspaint'", "flat surfaces around the cone obj3D = conevertandsurface(vcen, r, zlen,", "mx = my = 250 # x=y square bmp file", "random file name as string bmp = newBMP(mx, my, 24)", "zlen = 40 # height of cone deganglestep = 5", "= True # toggle solid or outline showoutline = False", "rotvec3D(25,240,70) # rotation vector (x,y,z) in degrees vcen = (1,0,0)", "# color list color = cf['brightyellow'] # color of solid", "subprocess as proc from os import path def main(): print(notice)", "this script mx = my = 250 # x=y square", "import subprocess as proc from os import path def main():", "main(): print(notice) imgedt = 'mspaint' # replace with another editor", "False # can show outline even if solid cf =", "%s\\nAll done close %s to finish' % \\ (file, rootdir,", "cone obj3D = conevertandsurface(vcen, r, zlen, deganglestep)# A solid is", "is defined by vertices and surfaces plot3Dsolid(bmp, obj3D, isSolid, color,", "by <NAME> | | [<EMAIL>] | |-----------------------------------| | We make", "method to get center of a bitmap cf = getRGBfactors()", "how finely we tile flat surfaces around the cone obj3D", "warranty | | of any kind, expressed or implied |", "bitmap cf = getRGBfactors() # color info with presets d,", "centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP ) import subprocess as", "= path.dirname(__file__) # get path of this script mx =", "color = cf['brightyellow'] # color of solid outlinecolor = 0", "coords r = 40 # radius of cone zlen =", "bmp file = 'HelloCone.bmp' # some random file name as", "center of a bitmap cf = getRGBfactors() # color info", "implied | |-----------------------------------| | This graphics library outputs | |", "or outline showoutline = False # can show outline even", "of cone deganglestep = 5 # how finely we tile", "outputs | | to a bitmap file. | ----------------------------------- \"\"\"", "file name as string bmp = newBMP(mx, my, 24) #", "obj3D = conevertandsurface(vcen, r, zlen, deganglestep)# A solid is defined", "editor if Unix rootdir = path.dirname(__file__) # get path of", "Python_BMP.BITMAPlib import( newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP )", "imgedt = 'mspaint' # replace with another editor if Unix", "0, 200] # be careful with these variables or object", "outline even if solid cf = getRGBfactors() # color list", "conevertandsurface(vcen, r, zlen, deganglestep)# A solid is defined by vertices", "# outline color rotation = rotvec3D(25,240,70) # rotation vector (x,y,z)", "name as string bmp = newBMP(mx, my, 24) # RGB", "helper method to get center of a bitmap cf =", "bmp) # save file print('Saved to %s in %s\\nAll done", "RGB bmp cenpt = centercoord(bmp) # helper method to get", "get path of this script mx = my = 250", "to a bitmap file. | ----------------------------------- \"\"\" from Python_BMP.BITMAPlib import(", "= centercoord(bmp) # helper method to get center of a", "expressed or implied | |-----------------------------------| | This graphics library outputs", "= 5 # how finely we tile flat surfaces around", "newBMP(mx, my, 24) # RGB bmp cenpt = centercoord(bmp) #" ]
[ "+= 1 plt.scatter(rmse[:, 0], rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0,", "import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import * from regular_array_sampling.functions import", "0 while n_train < 200: n_target = 100 +100*n #", "testing points X_test, y_test = create_testing_points_regular(noise) n = 0 n_target", "GP_machine_learning.GP_machine_learning_functions import * from regular_array_sampling.functions import regular_array_monte_carlo # create array", "for plotting rmse = np.ones((25, 2)) noise = 0.01 #", "curve for the 6D data simulator of CT* \"\"\" import", "0 n_target = 0 n_train = 0 while n_train <", "regular_array_monte_carlo # create array to store results for plotting rmse", "array of sampled regular array layouts #cand_points = regular_array_monte_carlo(10000) #", "import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process", "create testing points X_test, y_test = create_testing_points_regular(noise) n = 0", "from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern from sklearn.metrics import mean_squared_error", "import Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import * from", "as plt from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor", "plt.title('Training curve RBF - 6D 1% noise - irregular array", "create array to store results for plotting rmse = np.ones((25,", "GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern from sklearn.metrics import", "\\ create_training_points_irregular(n_target, noise) # fit GP regression and calculate rmse", "plt.xlim([0, 200]) plt.title('Training curve RBF - 6D 1% noise -", "2 * RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \\ +", "report rmse print(n_train, np.sqrt(mse)) rmse[n, 0] = n_train rmse[n, 1]", "y_test = create_testing_points_regular(noise) n = 0 n_target = 0 n_train", "rmse print(n_train, np.sqrt(mse)) rmse[n, 0] = n_train rmse[n, 1] =", "StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel,", "a training curve for the 6D data simulator of CT*", "plt from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from", "fit GP regression and calculate rmse kernel = 1.0 **", "print(n_train, np.sqrt(mse)) rmse[n, 0] = n_train rmse[n, 1] = np.sqrt(mse)", "+100*n # create training points X_train, y_train, n_train = \\", "n += 1 plt.scatter(rmse[:, 0], rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3, 1e-1])", "Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import * from regular_array_sampling.functions", "rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200]) plt.title('Training curve RBF", "as np import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler", "curve RBF - 6D 1% noise - irregular array training", "import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing", "from sklearn.pipeline import Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import", "# create training points X_train, y_train, n_train = \\ create_training_points_irregular(n_target,", "# create array of sampled regular array layouts #cand_points =", "= n_train rmse[n, 1] = np.sqrt(mse) n += 1 plt.scatter(rmse[:,", "n_target = 100 +100*n # create training points X_train, y_train,", "('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict = pipe.predict(X_test) mse =", "for the 6D data simulator of CT* \"\"\" import numpy", "mean_squared_error from sklearn.pipeline import Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions", "create array of sampled regular array layouts #cand_points = regular_array_monte_carlo(10000)", "0] = n_train rmse[n, 1] = np.sqrt(mse) n += 1", "1., 1., 1.]) \\ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe =", "plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200]) plt.title('Training curve RBF - 6D 1%", "1]) pipe = Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train)", "= mean_squared_error(y_test, y_predict) # report rmse print(n_train, np.sqrt(mse)) rmse[n, 0]", "matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import", "import * from regular_array_sampling.functions import regular_array_monte_carlo # create array to", "sklearn.pipeline import Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import *", "regular_array_monte_carlo(10000) # create testing points X_test, y_test = create_testing_points_regular(noise) n", "while n_train < 200: n_target = 100 +100*n # create", "GP regression and calculate rmse kernel = 1.0 ** 2", "pipe.predict(X_test) mse = mean_squared_error(y_test, y_predict) # report rmse print(n_train, np.sqrt(mse))", "training - max change halved') plt.ylabel('RMSE') plt.xlabel('Training points') plt.savefig('analysis/GP_machine_learning_plots/\\ gp_training_curve_RBF_irregular_training_maxchangehalved_regular_testing.png')", "import RBF, WhiteKernel, Matern from sklearn.metrics import mean_squared_error from sklearn.pipeline", "Matern from sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline import", "create_testing_points_regular(noise) n = 0 n_target = 0 n_train = 0", "irregular array training - max change halved') plt.ylabel('RMSE') plt.xlabel('Training points')", "np import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from", "1] = np.sqrt(mse) n += 1 plt.scatter(rmse[:, 0], rmse[:, 1])", "import mean_squared_error from sklearn.pipeline import Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from", "n_train rmse[n, 1] = np.sqrt(mse) n += 1 plt.scatter(rmse[:, 0],", "1 plt.scatter(rmse[:, 0], rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200])", "noise = 0.01 # create array of sampled regular array", "sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import * from regular_array_sampling.functions import regular_array_monte_carlo", "WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe = Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))])", "RBF, WhiteKernel, Matern from sklearn.metrics import mean_squared_error from sklearn.pipeline import", "from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern", "X_train, y_train, n_train = \\ create_training_points_irregular(n_target, noise) # fit GP", "plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200]) plt.title('Training curve RBF - 6D", "from regular_array_sampling.functions import regular_array_monte_carlo # create array to store results", "array training - max change halved') plt.ylabel('RMSE') plt.xlabel('Training points') plt.savefig('analysis/GP_machine_learning_plots/\\", "+ WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe = Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel,", "- irregular array training - max change halved') plt.ylabel('RMSE') plt.xlabel('Training", "simulator of CT* \"\"\" import numpy as np import matplotlib.pyplot", "0 n_train = 0 while n_train < 200: n_target =", "= \\ create_training_points_irregular(n_target, noise) # fit GP regression and calculate", "calculate rmse kernel = 1.0 ** 2 * RBF(length_scale=[1., 1.,", "create training points X_train, y_train, n_train = \\ create_training_points_irregular(n_target, noise)", "training points X_train, y_train, n_train = \\ create_training_points_irregular(n_target, noise) #", "create_training_points_irregular(n_target, noise) # fit GP regression and calculate rmse kernel", "StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict = pipe.predict(X_test) mse", "rmse = np.ones((25, 2)) noise = 0.01 # create array", "# create array to store results for plotting rmse =", "1.0 ** 2 * RBF(length_scale=[1., 1., 1., 1., 1., 1.])", "< 200: n_target = 100 +100*n # create training points", "1., 1.]) \\ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe = Pipeline([('scaler',", "layouts #cand_points = regular_array_monte_carlo(10000) # create testing points X_test, y_test", "y_predict = pipe.predict(X_test) mse = mean_squared_error(y_test, y_predict) # report rmse", "RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \\ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10,", "200]) plt.title('Training curve RBF - 6D 1% noise - irregular", "# create testing points X_test, y_test = create_testing_points_regular(noise) n =", "0.01 # create array of sampled regular array layouts #cand_points", "data simulator of CT* \"\"\" import numpy as np import", "noise) # fit GP regression and calculate rmse kernel =", "from sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline import sys", "of sampled regular array layouts #cand_points = regular_array_monte_carlo(10000) # create", "2)) noise = 0.01 # create array of sampled regular", "y_predict) # report rmse print(n_train, np.sqrt(mse)) rmse[n, 0] = n_train", "= 0 while n_train < 200: n_target = 100 +100*n", "0], rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200]) plt.title('Training curve", "n = 0 n_target = 0 n_train = 0 while", "training curve for the 6D data simulator of CT* \"\"\"", "= 0 n_train = 0 while n_train < 200: n_target", "kernel = 1.0 ** 2 * RBF(length_scale=[1., 1., 1., 1.,", "import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern from sklearn.metrics", "and calculate rmse kernel = 1.0 ** 2 * RBF(length_scale=[1.,", "100 +100*n # create training points X_train, y_train, n_train =", "numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import", "1]) plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200]) plt.title('Training curve RBF -", "sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import * from regular_array_sampling.functions import regular_array_monte_carlo #", "import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF,", "y_train, n_train = \\ create_training_points_irregular(n_target, noise) # fit GP regression", "\\ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe = Pipeline([('scaler', StandardScaler()), ('gp',", "array to store results for plotting rmse = np.ones((25, 2))", "Plot a training curve for the 6D data simulator of", "6D data simulator of CT* \"\"\" import numpy as np", "n_target = 0 n_train = 0 while n_train < 200:", "plotting rmse = np.ones((25, 2)) noise = 0.01 # create", "points X_train, y_train, n_train = \\ create_training_points_irregular(n_target, noise) # fit", "import regular_array_monte_carlo # create array to store results for plotting", "* from regular_array_sampling.functions import regular_array_monte_carlo # create array to store", "pipe = Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict", "store results for plotting rmse = np.ones((25, 2)) noise =", "= regular_array_monte_carlo(10000) # create testing points X_test, y_test = create_testing_points_regular(noise)", "1% noise - irregular array training - max change halved')", "to store results for plotting rmse = np.ones((25, 2)) noise", "np.sqrt(mse)) rmse[n, 0] = n_train rmse[n, 1] = np.sqrt(mse) n", "noise_level_bounds=[1e-10, 1]) pipe = Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train,", "# report rmse print(n_train, np.sqrt(mse)) rmse[n, 0] = n_train rmse[n,", "noise - irregular array training - max change halved') plt.ylabel('RMSE')", "6D 1% noise - irregular array training - max change", "= np.ones((25, 2)) noise = 0.01 # create array of", "* RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \\ + WhiteKernel(noise_level=1e-5,", "WhiteKernel, Matern from sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline", "plt.scatter(rmse[:, 0], rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200]) plt.title('Training", "rmse[n, 1] = np.sqrt(mse) n += 1 plt.scatter(rmse[:, 0], rmse[:,", "pipe.fit(X_train, y_train) y_predict = pipe.predict(X_test) mse = mean_squared_error(y_test, y_predict) #", "1., 1., 1., 1.]) \\ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe", "= 100 +100*n # create training points X_train, y_train, n_train", "200: n_target = 100 +100*n # create training points X_train,", "** 2 * RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \\", "1e-1]) plt.xlim([0, 200]) plt.title('Training curve RBF - 6D 1% noise", "= 1.0 ** 2 * RBF(length_scale=[1., 1., 1., 1., 1.,", "sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern from", "= 0 n_target = 0 n_train = 0 while n_train", "n_train < 200: n_target = 100 +100*n # create training", "= pipe.predict(X_test) mse = mean_squared_error(y_test, y_predict) # report rmse print(n_train,", "\"\"\" import numpy as np import matplotlib.pyplot as plt from", "array layouts #cand_points = regular_array_monte_carlo(10000) # create testing points X_test,", "rmse kernel = 1.0 ** 2 * RBF(length_scale=[1., 1., 1.,", "= np.sqrt(mse) n += 1 plt.scatter(rmse[:, 0], rmse[:, 1]) plt.yscale('log')", "RBF - 6D 1% noise - irregular array training -", "n_train = 0 while n_train < 200: n_target = 100", "mse = mean_squared_error(y_test, y_predict) # report rmse print(n_train, np.sqrt(mse)) rmse[n,", "y_train) y_predict = pipe.predict(X_test) mse = mean_squared_error(y_test, y_predict) # report", "<reponame>AndrewKirby2/data_synthesis \"\"\" Plot a training curve for the 6D data", "of CT* \"\"\" import numpy as np import matplotlib.pyplot as", "X_test, y_test = create_testing_points_regular(noise) n = 0 n_target = 0", "n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict = pipe.predict(X_test) mse = mean_squared_error(y_test, y_predict)", "Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict = pipe.predict(X_test)", "sampled regular array layouts #cand_points = regular_array_monte_carlo(10000) # create testing", "from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels", "regression and calculate rmse kernel = 1.0 ** 2 *", "1., 1., 1., 1., 1.]) \\ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1])", "GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict = pipe.predict(X_test) mse = mean_squared_error(y_test,", "results for plotting rmse = np.ones((25, 2)) noise = 0.01", "mean_squared_error(y_test, y_predict) # report rmse print(n_train, np.sqrt(mse)) rmse[n, 0] =", "rmse[n, 0] = n_train rmse[n, 1] = np.sqrt(mse) n +=", "n_train = \\ create_training_points_irregular(n_target, noise) # fit GP regression and", "1.]) \\ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe = Pipeline([('scaler', StandardScaler()),", "# fit GP regression and calculate rmse kernel = 1.0", "- 6D 1% noise - irregular array training - max", "regular array layouts #cand_points = regular_array_monte_carlo(10000) # create testing points", "the 6D data simulator of CT* \"\"\" import numpy as", "sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern from sklearn.metrics import mean_squared_error from", "from GP_machine_learning.GP_machine_learning_functions import * from regular_array_sampling.functions import regular_array_monte_carlo # create", "regular_array_sampling.functions import regular_array_monte_carlo # create array to store results for", "sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis')", "sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import", "points X_test, y_test = create_testing_points_regular(noise) n = 0 n_target =", "np.sqrt(mse) n += 1 plt.scatter(rmse[:, 0], rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3,", "np.ones((25, 2)) noise = 0.01 # create array of sampled", "= create_testing_points_regular(noise) n = 0 n_target = 0 n_train =", "= Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict =", "CT* \"\"\" import numpy as np import matplotlib.pyplot as plt", "\"\"\" Plot a training curve for the 6D data simulator", "#cand_points = regular_array_monte_carlo(10000) # create testing points X_test, y_test =", "= 0.01 # create array of sampled regular array layouts" ]
[ "the AC previously. The list is ordered chronologically, from newest", "tquery import get_latest_record from config import * app = Flask(__name__)", "== 'POST': # someone's logging in if not request.form['username'] in", "= db_record[0].strftime(\"%H:%M\") out_record['celsius'] = db_record[1] out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32))", "def command_history(): \"\"\"Returns a list of dictionaries, each containing a", "not in app.config['ACTEMPERATURES']: codes['temperature_error'] = True else: codes['temperature_error'] = False", "from commands order by id desc') command_history = [] for", "db.cursor().executescript(f.read()) db.commit() # Auto-open and close DB when serving requests", "close DB when serving requests @app.before_request def before_request(): g.db =", "AC user_mode = request.form['mode'] user_temperature = request.form['temperature'] validation_codes = validate_AC_command(user_mode,", "if request.method == 'POST': # command is being issued to", "redirect(url_for('submit_page')) return render_template('welcome_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/submit', methods=['GET', 'POST']) def", "not session.get('username'): abort(401) if request.method == 'POST': # command is", "get_latest_record() out_record = dict() out_record['date'] = db_record[0].strftime(\"%Y-%m-%d\") out_record['time'] = db_record[0].strftime(\"%H:%M\")", "= 'heat' + codes['temperature'] else: command_postfix = codes['temperature'] codes['command'] =", "the sqlite3 database. This function must be imported and executed", "a list of dictionaries, each containing a command issued to", "is being issued to AC user_mode = request.form['mode'] user_temperature =", "Python interpreter before the application is first run.\"\"\" with closing(connect_db())", "== 'off': cmd = 'off' else: cmd = 'cool to", "error = 'password' else: # successful login session['username'] = request.form['username']", "app = Flask(__name__) app.config.from_object(__name__) # DB helper functions def connect_db():", "session['username'] + '!') return redirect(url_for('submit_page')) return render_template('welcome_page.html', commands=command_history(), error=error, last_record=last_record())", "== 'POST': # command is being issued to AC user_mode", "with closing(connect_db()) as db: with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read())", "def before_request(): g.db = connect_db() @app.teardown_request def teardown_request(exception): db =", "def last_record(): \"\"\"Returns the last temperature and humidity record data.", "run.\"\"\" with closing(connect_db()) as db: with app.open_resource('schema.sql', mode='r') as f:", "ts, user) values (?, ?, ?)', [validation_codes['command'], datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), session['username']])", "is not 'off' and user_temperature not in app.config['ACTEMPERATURES']: codes['temperature_error'] =", "+ row[0][4:] elif row[0] == 'off': cmd = 'off' else:", "subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac', validation_codes['command']]) g.db.execute('insert into commands (command, ts, user) values", "cur = g.db.execute('select command, ts, user from commands order by", "user_temperature = request.form['temperature'] validation_codes = validate_AC_command(user_mode, user_temperature) if (validation_codes['mode_error'] or", "g, redirect, url_for, \\ abort, render_template, flash from contextlib import", "= connect_db() @app.teardown_request def teardown_request(exception): db = getattr(g, 'db', None)", "(?, ?, ?)', [validation_codes['command'], datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), session['username']]) g.db.commit() flash('Command submitted')", "import get_latest_record from config import * app = Flask(__name__) app.config.from_object(__name__)", "return codes def command_history(): \"\"\"Returns a list of dictionaries, each", "app.config.from_object(__name__) # DB helper functions def connect_db(): return sqlite3.connect(app.config['DATABASE']) def", "into commands (command, ts, user) values (?, ?, ?)', [validation_codes['command'],", "request.form['mode'] user_temperature = request.form['temperature'] validation_codes = validate_AC_command(user_mode, user_temperature) if (validation_codes['mode_error']", "cmd = 'cool to ' + row[0] command_history.append(dict(command=cmd, ts=row[1], user=row[2]))", "teardown_request(exception): db = getattr(g, 'db', None) if db is not", "flash('Command submitted') return render_template('submit_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/logout') def logout():", "to the AC previously. The list is ordered chronologically, from", "and session['username']: return redirect(url_for('submit_page')) error = None if request.method ==", "to ' + row[0] command_history.append(dict(command=cmd, ts=row[1], user=row[2])) return command_history def", "error=error, last_record=last_record()) @app.route('/submit', methods=['GET', 'POST']) def submit_page(): error = None", "a dict with keys ts, fahrenheit, celsius and humidity. \"\"\"", "in app.config['ACTEMPERATURES']: codes['temperature_error'] = True else: codes['temperature_error'] = False if", "session['username'] = request.form['username'] flash('Hi ' + session['username'] + '!') return", "redirect(url_for('welcome_page')) def validate_AC_command(user_mode, user_temperature): \"\"\"Validates and sanitizes user-input command; translates", "each containing a command issued to the AC previously. The", "request.method == 'POST': # command is being issued to AC", "'heat to ' + row[0][4:] elif row[0] == 'off': cmd", "= None if not session.get('username'): abort(401) if request.method == 'POST':", "render_template('welcome_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/submit', methods=['GET', 'POST']) def submit_page(): error", "last_record(): \"\"\"Returns the last temperature and humidity record data. The", "None if request.method == 'POST': # someone's logging in if", "'off': command_postfix = 'off' elif codes['mode'] == 'heat': command_postfix =", "flash('Hi ' + session['username'] + '!') return redirect(url_for('submit_page')) return render_template('welcome_page.html',", "# successful login session['username'] = request.form['username'] flash('Hi ' + session['username']", "command_postfix = 'off' elif codes['mode'] == 'heat': command_postfix = 'heat'", "+ 32)) out_record['humidity'] = int(round(db_record[2])) return out_record if __name__ ==", "= 'off' elif codes['mode'] == 'heat': command_postfix = 'heat' +", "= [] for row in cur.fetchall(): if row[0][0] == 'h':", "first run.\"\"\" with closing(connect_db()) as db: with app.open_resource('schema.sql', mode='r') as", "db_record[0].strftime(\"%H:%M\") out_record['celsius'] = db_record[1] out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32)) out_record['humidity']", "db = getattr(g, 'db', None) if db is not None:", "'lgac', validation_codes['command']]) g.db.execute('insert into commands (command, ts, user) values (?,", "== 'heat': command_postfix = 'heat' + codes['temperature'] else: command_postfix =", "flash('You were logged out') return redirect(url_for('welcome_page')) def validate_AC_command(user_mode, user_temperature): \"\"\"Validates", "mode='r') as f: db.cursor().executescript(f.read()) db.commit() # Auto-open and close DB", "\"\"\"Validates and sanitizes user-input command; translates command into irsend call.\"\"\"", "if user_mode not in app.config['ACMODES']: codes['mode_error'] = True else: codes['mode_error']", "app.config['ACTEMPERATURES']: codes['temperature_error'] = True else: codes['temperature_error'] = False if not", "init_db(): \"\"\"Initializes the sqlite3 database. This function must be imported", "out') return redirect(url_for('welcome_page')) def validate_AC_command(user_mode, user_temperature): \"\"\"Validates and sanitizes user-input", "# DB helper functions def connect_db(): return sqlite3.connect(app.config['DATABASE']) def init_db():", "= Flask(__name__) app.config.from_object(__name__) # DB helper functions def connect_db(): return", "redirect(url_for('submit_page')) error = None if request.method == 'POST': # someone's", "be imported and executed from the Python interpreter before the", "from newest to oldest.\"\"\" cur = g.db.execute('select command, ts, user", "' + row[0][4:] elif row[0] == 'off': cmd = 'off'", "error = 'username' elif request.form['password'] != app.config['PASSWORD']: error = 'password'", "ts, user from commands order by id desc') command_history =", "sqlite3 import subprocess, datetime from flask import Flask, request, session,", "list is ordered chronologically, from newest to oldest.\"\"\" cur =", "' + session['username'] + '!') return redirect(url_for('submit_page')) return render_template('welcome_page.html', commands=command_history(),", "ts=row[1], user=row[2])) return command_history def last_record(): \"\"\"Returns the last temperature", "methods=['GET', 'POST']) def submit_page(): error = None if not session.get('username'):", "= command_postfix return codes def command_history(): \"\"\"Returns a list of", "= 'username' elif request.form['password'] != app.config['PASSWORD']: error = 'password' else:", "interpreter before the application is first run.\"\"\" with closing(connect_db()) as", "import Flask, request, session, g, redirect, url_for, \\ abort, render_template,", "command into irsend call.\"\"\" codes = dict() if user_mode not", "False if not codes['mode_error'] and not codes['temperature_error']: codes['mode'] = user_mode", "command_history = [] for row in cur.fetchall(): if row[0][0] ==", "command_history def last_record(): \"\"\"Returns the last temperature and humidity record", "connect_db() @app.teardown_request def teardown_request(exception): db = getattr(g, 'db', None) if", "were logged out') return redirect(url_for('welcome_page')) def validate_AC_command(user_mode, user_temperature): \"\"\"Validates and", "call.\"\"\" codes = dict() if user_mode not in app.config['ACMODES']: codes['mode_error']", "values (?, ?, ?)', [validation_codes['command'], datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), session['username']]) g.db.commit() flash('Command", "helper functions def connect_db(): return sqlite3.connect(app.config['DATABASE']) def init_db(): \"\"\"Initializes the", "return command_history def last_record(): \"\"\"Returns the last temperature and humidity", "dict with keys ts, fahrenheit, celsius and humidity. \"\"\" db_record", "\"\"\" db_record = get_latest_record() out_record = dict() out_record['date'] = db_record[0].strftime(\"%Y-%m-%d\")", "command_postfix = codes['temperature'] codes['command'] = command_postfix return codes def command_history():", "@app.teardown_request def teardown_request(exception): db = getattr(g, 'db', None) if db", "\"\"\"Returns a list of dictionaries, each containing a command issued", "user_mode not in app.config['ACMODES']: codes['mode_error'] = True else: codes['mode_error'] =", "and sanitizes user-input command; translates command into irsend call.\"\"\" codes", "get_latest_record from config import * app = Flask(__name__) app.config.from_object(__name__) #", "the Python interpreter before the application is first run.\"\"\" with", "app.config['USERNAMES']: error = 'username' elif request.form['password'] != app.config['PASSWORD']: error =", "render_template, flash from contextlib import closing from tquery import get_latest_record", "before the application is first run.\"\"\" with closing(connect_db()) as db:", "= validate_AC_command(user_mode, user_temperature) if (validation_codes['mode_error'] or validation_codes['temperature_error']): error=validation_codes else: subprocess.call(['/usr/bin/irsend','SEND_ONCE',", "[validation_codes['command'], datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), session['username']]) g.db.commit() flash('Command submitted') return render_template('submit_page.html', commands=command_history(),", "the last temperature and humidity record data. The returned object", "db: with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() # Auto-open", "'off' elif codes['mode'] == 'heat': command_postfix = 'heat' + codes['temperature']", "is not None: db.close() @app.route('/', methods=['GET', 'POST']) def welcome_page(): if", "= True else: codes['mode_error'] = False if user_mode is not", "= db_record[1] out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32)) out_record['humidity'] = int(round(db_record[2]))", "# command is being issued to AC user_mode = request.form['mode']", "# Auto-open and close DB when serving requests @app.before_request def", "Auto-open and close DB when serving requests @app.before_request def before_request():", "cmd = 'heat to ' + row[0][4:] elif row[0] ==", "(command, ts, user) values (?, ?, ?)', [validation_codes['command'], datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),", "return render_template('welcome_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/submit', methods=['GET', 'POST']) def submit_page():", "import closing from tquery import get_latest_record from config import *", "and close DB when serving requests @app.before_request def before_request(): g.db", "def submit_page(): error = None if not session.get('username'): abort(401) if", "to oldest.\"\"\" cur = g.db.execute('select command, ts, user from commands", "submit_page(): error = None if not session.get('username'): abort(401) if request.method", "codes['temperature'] else: command_postfix = codes['temperature'] codes['command'] = command_postfix return codes", "out_record['date'] = db_record[0].strftime(\"%Y-%m-%d\") out_record['time'] = db_record[0].strftime(\"%H:%M\") out_record['celsius'] = db_record[1] out_record['fahrenheit']", "= 'password' else: # successful login session['username'] = request.form['username'] flash('Hi", "in app.config['USERNAMES']: error = 'username' elif request.form['password'] != app.config['PASSWORD']: error", "celsius and humidity. \"\"\" db_record = get_latest_record() out_record = dict()", "= db_record[0].strftime(\"%Y-%m-%d\") out_record['time'] = db_record[0].strftime(\"%H:%M\") out_record['celsius'] = db_record[1] out_record['fahrenheit'] =", "32)) out_record['humidity'] = int(round(db_record[2])) return out_record if __name__ == '__main__':", "desc') command_history = [] for row in cur.fetchall(): if row[0][0]", "== 'off': command_postfix = 'off' elif codes['mode'] == 'heat': command_postfix", "methods=['GET', 'POST']) def welcome_page(): if 'username' in session and session['username']:", "codes['mode'] == 'off': command_postfix = 'off' elif codes['mode'] == 'heat':", "out_record['celsius'] = db_record[1] out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32)) out_record['humidity'] =", "def welcome_page(): if 'username' in session and session['username']: return redirect(url_for('submit_page'))", "to ' + row[0][4:] elif row[0] == 'off': cmd =", "returned object is a dict with keys ts, fahrenheit, celsius", "= dict() if user_mode not in app.config['ACMODES']: codes['mode_error'] = True", "True else: codes['mode_error'] = False if user_mode is not 'off'", "from contextlib import closing from tquery import get_latest_record from config", "and not codes['temperature_error']: codes['mode'] = user_mode codes['temperature'] = user_temperature if", "logged out') return redirect(url_for('welcome_page')) def validate_AC_command(user_mode, user_temperature): \"\"\"Validates and sanitizes", "+ row[0] command_history.append(dict(command=cmd, ts=row[1], user=row[2])) return command_history def last_record(): \"\"\"Returns", "datetime from flask import Flask, request, session, g, redirect, url_for,", "' + row[0] command_history.append(dict(command=cmd, ts=row[1], user=row[2])) return command_history def last_record():", "user_mode = request.form['mode'] user_temperature = request.form['temperature'] validation_codes = validate_AC_command(user_mode, user_temperature)", "user) values (?, ?, ?)', [validation_codes['command'], datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), session['username']]) g.db.commit()", "session['username']]) g.db.commit() flash('Command submitted') return render_template('submit_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/logout')", "login session['username'] = request.form['username'] flash('Hi ' + session['username'] + '!')", "function must be imported and executed from the Python interpreter", "The returned object is a dict with keys ts, fahrenheit,", "list of dictionaries, each containing a command issued to the", "g.db.commit() flash('Command submitted') return render_template('submit_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/logout') def", "request.method == 'POST': # someone's logging in if not request.form['username']", "else: subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac', validation_codes['command']]) g.db.execute('insert into commands (command, ts, user)", "datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), session['username']]) g.db.commit() flash('Command submitted') return render_template('submit_page.html', commands=command_history(), error=error,", "with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() # Auto-open and", "'password' else: # successful login session['username'] = request.form['username'] flash('Hi '", "or validation_codes['temperature_error']): error=validation_codes else: subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac', validation_codes['command']]) g.db.execute('insert into commands", "not None: db.close() @app.route('/', methods=['GET', 'POST']) def welcome_page(): if 'username'", "database. This function must be imported and executed from the", "out_record = dict() out_record['date'] = db_record[0].strftime(\"%Y-%m-%d\") out_record['time'] = db_record[0].strftime(\"%H:%M\") out_record['celsius']", "issued to AC user_mode = request.form['mode'] user_temperature = request.form['temperature'] validation_codes", "if (validation_codes['mode_error'] or validation_codes['temperature_error']): error=validation_codes else: subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac', validation_codes['command']]) g.db.execute('insert", "submitted') return render_template('submit_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/logout') def logout(): session.pop('logged_in',", "command, ts, user from commands order by id desc') command_history", "abort(401) if request.method == 'POST': # command is being issued", "= False if user_mode is not 'off' and user_temperature not", "ordered chronologically, from newest to oldest.\"\"\" cur = g.db.execute('select command,", "(validation_codes['mode_error'] or validation_codes['temperature_error']): error=validation_codes else: subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac', validation_codes['command']]) g.db.execute('insert into", "closing from tquery import get_latest_record from config import * app", "and executed from the Python interpreter before the application is", "row[0] command_history.append(dict(command=cmd, ts=row[1], user=row[2])) return command_history def last_record(): \"\"\"Returns the", "session and session['username']: return redirect(url_for('submit_page')) error = None if request.method", "command is being issued to AC user_mode = request.form['mode'] user_temperature", "oldest.\"\"\" cur = g.db.execute('select command, ts, user from commands order", "flash from contextlib import closing from tquery import get_latest_record from", "codes['temperature_error'] = False if not codes['mode_error'] and not codes['temperature_error']: codes['mode']", "\\ abort, render_template, flash from contextlib import closing from tquery", "functions def connect_db(): return sqlite3.connect(app.config['DATABASE']) def init_db(): \"\"\"Initializes the sqlite3", "'POST': # command is being issued to AC user_mode =", "'heat': command_postfix = 'heat' + codes['temperature'] else: command_postfix = codes['temperature']", "True else: codes['temperature_error'] = False if not codes['mode_error'] and not", "return render_template('submit_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/logout') def logout(): session.pop('logged_in', None)", "AC previously. The list is ordered chronologically, from newest to", "connect_db(): return sqlite3.connect(app.config['DATABASE']) def init_db(): \"\"\"Initializes the sqlite3 database. This", "g.db.execute('insert into commands (command, ts, user) values (?, ?, ?)',", "\"\"\"Initializes the sqlite3 database. This function must be imported and", "'POST']) def submit_page(): error = None if not session.get('username'): abort(401)", "in cur.fetchall(): if row[0][0] == 'h': cmd = 'heat to", "user_temperature) if (validation_codes['mode_error'] or validation_codes['temperature_error']): error=validation_codes else: subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac', validation_codes['command']])", "db.commit() # Auto-open and close DB when serving requests @app.before_request", "someone's logging in if not request.form['username'] in app.config['USERNAMES']: error =", "from the Python interpreter before the application is first run.\"\"\"", "fahrenheit, celsius and humidity. \"\"\" db_record = get_latest_record() out_record =", "url_for, \\ abort, render_template, flash from contextlib import closing from", "@app.route('/logout') def logout(): session.pop('logged_in', None) flash('You were logged out') return", "The list is ordered chronologically, from newest to oldest.\"\"\" cur", "codes['mode_error'] and not codes['temperature_error']: codes['mode'] = user_mode codes['temperature'] = user_temperature", "= request.form['mode'] user_temperature = request.form['temperature'] validation_codes = validate_AC_command(user_mode, user_temperature) if", "== 'h': cmd = 'heat to ' + row[0][4:] elif", "if not session.get('username'): abort(401) if request.method == 'POST': # command", "user=row[2])) return command_history def last_record(): \"\"\"Returns the last temperature and", "'off' else: cmd = 'cool to ' + row[0] command_history.append(dict(command=cmd,", "newest to oldest.\"\"\" cur = g.db.execute('select command, ts, user from", "out_record['humidity'] = int(round(db_record[2])) return out_record if __name__ == '__main__': app.run(host='0.0.0.0')", "elif request.form['password'] != app.config['PASSWORD']: error = 'password' else: # successful", "if row[0][0] == 'h': cmd = 'heat to ' +", "command_postfix return codes def command_history(): \"\"\"Returns a list of dictionaries,", "render_template('submit_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/logout') def logout(): session.pop('logged_in', None) flash('You", "session.get('username'): abort(401) if request.method == 'POST': # command is being", "= 'cool to ' + row[0] command_history.append(dict(command=cmd, ts=row[1], user=row[2])) return", "False if user_mode is not 'off' and user_temperature not in", "validate_AC_command(user_mode, user_temperature) if (validation_codes['mode_error'] or validation_codes['temperature_error']): error=validation_codes else: subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac',", "= user_temperature if codes['mode'] == 'off': command_postfix = 'off' elif", "@app.route('/', methods=['GET', 'POST']) def welcome_page(): if 'username' in session and", "None: db.close() @app.route('/', methods=['GET', 'POST']) def welcome_page(): if 'username' in", "keys ts, fahrenheit, celsius and humidity. \"\"\" db_record = get_latest_record()", "request.form['temperature'] validation_codes = validate_AC_command(user_mode, user_temperature) if (validation_codes['mode_error'] or validation_codes['temperature_error']): error=validation_codes", "db is not None: db.close() @app.route('/', methods=['GET', 'POST']) def welcome_page():", "\"\"\"Returns the last temperature and humidity record data. The returned", "not request.form['username'] in app.config['USERNAMES']: error = 'username' elif request.form['password'] !=", "= codes['temperature'] codes['command'] = command_postfix return codes def command_history(): \"\"\"Returns", "db_record = get_latest_record() out_record = dict() out_record['date'] = db_record[0].strftime(\"%Y-%m-%d\") out_record['time']", "application is first run.\"\"\" with closing(connect_db()) as db: with app.open_resource('schema.sql',", "codes def command_history(): \"\"\"Returns a list of dictionaries, each containing", "return redirect(url_for('submit_page')) return render_template('welcome_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/submit', methods=['GET', 'POST'])", "request.form['username'] in app.config['USERNAMES']: error = 'username' elif request.form['password'] != app.config['PASSWORD']:", "DB when serving requests @app.before_request def before_request(): g.db = connect_db()", "command issued to the AC previously. The list is ordered", "issued to the AC previously. The list is ordered chronologically,", "from flask import Flask, request, session, g, redirect, url_for, \\", "elif row[0] == 'off': cmd = 'off' else: cmd =", "def validate_AC_command(user_mode, user_temperature): \"\"\"Validates and sanitizes user-input command; translates command", "dictionaries, each containing a command issued to the AC previously.", "is a dict with keys ts, fahrenheit, celsius and humidity.", "as db: with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() #", "from config import * app = Flask(__name__) app.config.from_object(__name__) # DB", "import sqlite3 import subprocess, datetime from flask import Flask, request,", "being issued to AC user_mode = request.form['mode'] user_temperature = request.form['temperature']", "request.form['password'] != app.config['PASSWORD']: error = 'password' else: # successful login", "a command issued to the AC previously. The list is", "logging in if not request.form['username'] in app.config['USERNAMES']: error = 'username'", "user_temperature not in app.config['ACTEMPERATURES']: codes['temperature_error'] = True else: codes['temperature_error'] =", "user from commands order by id desc') command_history = []", "if user_mode is not 'off' and user_temperature not in app.config['ACTEMPERATURES']:", "irsend call.\"\"\" codes = dict() if user_mode not in app.config['ACMODES']:", "+ session['username'] + '!') return redirect(url_for('submit_page')) return render_template('welcome_page.html', commands=command_history(), error=error,", "else: codes['mode_error'] = False if user_mode is not 'off' and", "requests @app.before_request def before_request(): g.db = connect_db() @app.teardown_request def teardown_request(exception):", "# someone's logging in if not request.form['username'] in app.config['USERNAMES']: error", "out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32)) out_record['humidity'] = int(round(db_record[2])) return out_record", "as f: db.cursor().executescript(f.read()) db.commit() # Auto-open and close DB when", "= g.db.execute('select command, ts, user from commands order by id", "DB helper functions def connect_db(): return sqlite3.connect(app.config['DATABASE']) def init_db(): \"\"\"Initializes", "else: # successful login session['username'] = request.form['username'] flash('Hi ' +", "in app.config['ACMODES']: codes['mode_error'] = True else: codes['mode_error'] = False if", "validation_codes['temperature_error']): error=validation_codes else: subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac', validation_codes['command']]) g.db.execute('insert into commands (command,", "if not request.form['username'] in app.config['USERNAMES']: error = 'username' elif request.form['password']", "record data. The returned object is a dict with keys", "= False if not codes['mode_error'] and not codes['temperature_error']: codes['mode'] =", "validate_AC_command(user_mode, user_temperature): \"\"\"Validates and sanitizes user-input command; translates command into", "= request.form['temperature'] validation_codes = validate_AC_command(user_mode, user_temperature) if (validation_codes['mode_error'] or validation_codes['temperature_error']):", "def connect_db(): return sqlite3.connect(app.config['DATABASE']) def init_db(): \"\"\"Initializes the sqlite3 database.", "welcome_page(): if 'username' in session and session['username']: return redirect(url_for('submit_page')) error", "'!') return redirect(url_for('submit_page')) return render_template('welcome_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/submit', methods=['GET',", "'POST': # someone's logging in if not request.form['username'] in app.config['USERNAMES']:", "sqlite3.connect(app.config['DATABASE']) def init_db(): \"\"\"Initializes the sqlite3 database. This function must", "= getattr(g, 'db', None) if db is not None: db.close()", "validation_codes['command']]) g.db.execute('insert into commands (command, ts, user) values (?, ?,", "def teardown_request(exception): db = getattr(g, 'db', None) if db is", "= 'heat to ' + row[0][4:] elif row[0] == 'off':", "translates command into irsend call.\"\"\" codes = dict() if user_mode", "is ordered chronologically, from newest to oldest.\"\"\" cur = g.db.execute('select", "contextlib import closing from tquery import get_latest_record from config import", "def logout(): session.pop('logged_in', None) flash('You were logged out') return redirect(url_for('welcome_page'))", "data. The returned object is a dict with keys ts,", "request, session, g, redirect, url_for, \\ abort, render_template, flash from", "import subprocess, datetime from flask import Flask, request, session, g,", "sanitizes user-input command; translates command into irsend call.\"\"\" codes =", "return redirect(url_for('welcome_page')) def validate_AC_command(user_mode, user_temperature): \"\"\"Validates and sanitizes user-input command;", "'db', None) if db is not None: db.close() @app.route('/', methods=['GET',", "if db is not None: db.close() @app.route('/', methods=['GET', 'POST']) def", "'off' and user_temperature not in app.config['ACTEMPERATURES']: codes['temperature_error'] = True else:", "+ '!') return redirect(url_for('submit_page')) return render_template('welcome_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/submit',", "codes = dict() if user_mode not in app.config['ACMODES']: codes['mode_error'] =", "and humidity record data. The returned object is a dict", "commands (command, ts, user) values (?, ?, ?)', [validation_codes['command'], datetime.datetime.now().strftime(\"%Y-%m-%d", "commands=command_history(), error=error, last_record=last_record()) @app.route('/logout') def logout(): session.pop('logged_in', None) flash('You were", "chronologically, from newest to oldest.\"\"\" cur = g.db.execute('select command, ts,", "cmd = 'off' else: cmd = 'cool to ' +", "is first run.\"\"\" with closing(connect_db()) as db: with app.open_resource('schema.sql', mode='r')", "not in app.config['ACMODES']: codes['mode_error'] = True else: codes['mode_error'] = False", "g.db.execute('select command, ts, user from commands order by id desc')", "row[0][4:] elif row[0] == 'off': cmd = 'off' else: cmd", "user_mode codes['temperature'] = user_temperature if codes['mode'] == 'off': command_postfix =", "if request.method == 'POST': # someone's logging in if not", "else: codes['temperature_error'] = False if not codes['mode_error'] and not codes['temperature_error']:", "user-input command; translates command into irsend call.\"\"\" codes = dict()", "session, g, redirect, url_for, \\ abort, render_template, flash from contextlib", "Flask(__name__) app.config.from_object(__name__) # DB helper functions def connect_db(): return sqlite3.connect(app.config['DATABASE'])", "command_history.append(dict(command=cmd, ts=row[1], user=row[2])) return command_history def last_record(): \"\"\"Returns the last", "app.config['PASSWORD']: error = 'password' else: # successful login session['username'] =", "!= app.config['PASSWORD']: error = 'password' else: # successful login session['username']", "codes['command'] = command_postfix return codes def command_history(): \"\"\"Returns a list", "out_record['time'] = db_record[0].strftime(\"%H:%M\") out_record['celsius'] = db_record[1] out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 +", "codes['mode_error'] = True else: codes['mode_error'] = False if user_mode is", "with keys ts, fahrenheit, celsius and humidity. \"\"\" db_record =", "else: command_postfix = codes['temperature'] codes['command'] = command_postfix return codes def", "abort, render_template, flash from contextlib import closing from tquery import", "return sqlite3.connect(app.config['DATABASE']) def init_db(): \"\"\"Initializes the sqlite3 database. This function", "error=error, last_record=last_record()) @app.route('/logout') def logout(): session.pop('logged_in', None) flash('You were logged", "error = None if not session.get('username'): abort(401) if request.method ==", "'username' elif request.form['password'] != app.config['PASSWORD']: error = 'password' else: #", "command_postfix = 'heat' + codes['temperature'] else: command_postfix = codes['temperature'] codes['command']", "f: db.cursor().executescript(f.read()) db.commit() # Auto-open and close DB when serving", "* app = Flask(__name__) app.config.from_object(__name__) # DB helper functions def", "codes['temperature_error'] = True else: codes['temperature_error'] = False if not codes['mode_error']", "subprocess, datetime from flask import Flask, request, session, g, redirect,", "the application is first run.\"\"\" with closing(connect_db()) as db: with", "= get_latest_record() out_record = dict() out_record['date'] = db_record[0].strftime(\"%Y-%m-%d\") out_record['time'] =", "if codes['mode'] == 'off': command_postfix = 'off' elif codes['mode'] ==", "successful login session['username'] = request.form['username'] flash('Hi ' + session['username'] +", "@app.before_request def before_request(): g.db = connect_db() @app.teardown_request def teardown_request(exception): db", "row[0][0] == 'h': cmd = 'heat to ' + row[0][4:]", "db_record[1] out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32)) out_record['humidity'] = int(round(db_record[2])) return", "flask import Flask, request, session, g, redirect, url_for, \\ abort,", "and humidity. \"\"\" db_record = get_latest_record() out_record = dict() out_record['date']", "error=validation_codes else: subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac', validation_codes['command']]) g.db.execute('insert into commands (command, ts,", "user_mode is not 'off' and user_temperature not in app.config['ACTEMPERATURES']: codes['temperature_error']", "commands order by id desc') command_history = [] for row", "codes['mode_error'] = False if user_mode is not 'off' and user_temperature", "= 'off' else: cmd = 'cool to ' + row[0]", "= user_mode codes['temperature'] = user_temperature if codes['mode'] == 'off': command_postfix", "redirect, url_for, \\ abort, render_template, flash from contextlib import closing", "must be imported and executed from the Python interpreter before", "last_record=last_record()) @app.route('/logout') def logout(): session.pop('logged_in', None) flash('You were logged out')", "when serving requests @app.before_request def before_request(): g.db = connect_db() @app.teardown_request", "to AC user_mode = request.form['mode'] user_temperature = request.form['temperature'] validation_codes =", "= int(round(out_record['celsius']*9/5.0 + 32)) out_record['humidity'] = int(round(db_record[2])) return out_record if", "def init_db(): \"\"\"Initializes the sqlite3 database. This function must be", "'POST']) def welcome_page(): if 'username' in session and session['username']: return", "imported and executed from the Python interpreter before the application", "cur.fetchall(): if row[0][0] == 'h': cmd = 'heat to '", "not codes['mode_error'] and not codes['temperature_error']: codes['mode'] = user_mode codes['temperature'] =", "getattr(g, 'db', None) if db is not None: db.close() @app.route('/',", "dict() if user_mode not in app.config['ACMODES']: codes['mode_error'] = True else:", "for row in cur.fetchall(): if row[0][0] == 'h': cmd =", "[] for row in cur.fetchall(): if row[0][0] == 'h': cmd", "ts, fahrenheit, celsius and humidity. \"\"\" db_record = get_latest_record() out_record", "app.config['ACMODES']: codes['mode_error'] = True else: codes['mode_error'] = False if user_mode", "error = None if request.method == 'POST': # someone's logging", "and user_temperature not in app.config['ACTEMPERATURES']: codes['temperature_error'] = True else: codes['temperature_error']", "session['username']: return redirect(url_for('submit_page')) error = None if request.method == 'POST':", "'h': cmd = 'heat to ' + row[0][4:] elif row[0]", "elif codes['mode'] == 'heat': command_postfix = 'heat' + codes['temperature'] else:", "?)', [validation_codes['command'], datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), session['username']]) g.db.commit() flash('Command submitted') return render_template('submit_page.html',", "= dict() out_record['date'] = db_record[0].strftime(\"%Y-%m-%d\") out_record['time'] = db_record[0].strftime(\"%H:%M\") out_record['celsius'] =", "user_temperature if codes['mode'] == 'off': command_postfix = 'off' elif codes['mode']", "None) flash('You were logged out') return redirect(url_for('welcome_page')) def validate_AC_command(user_mode, user_temperature):", "by id desc') command_history = [] for row in cur.fetchall():", "'cool to ' + row[0] command_history.append(dict(command=cmd, ts=row[1], user=row[2])) return command_history", "sqlite3 database. This function must be imported and executed from", "g.db = connect_db() @app.teardown_request def teardown_request(exception): db = getattr(g, 'db',", "validation_codes = validate_AC_command(user_mode, user_temperature) if (validation_codes['mode_error'] or validation_codes['temperature_error']): error=validation_codes else:", "Flask, request, session, g, redirect, url_for, \\ abort, render_template, flash", "closing(connect_db()) as db: with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit()", "app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() # Auto-open and close", "last_record=last_record()) @app.route('/submit', methods=['GET', 'POST']) def submit_page(): error = None if", "config import * app = Flask(__name__) app.config.from_object(__name__) # DB helper", "into irsend call.\"\"\" codes = dict() if user_mode not in", "from tquery import get_latest_record from config import * app =", "serving requests @app.before_request def before_request(): g.db = connect_db() @app.teardown_request def", "if not codes['mode_error'] and not codes['temperature_error']: codes['mode'] = user_mode codes['temperature']", "containing a command issued to the AC previously. The list", "codes['temperature'] codes['command'] = command_postfix return codes def command_history(): \"\"\"Returns a", "humidity. \"\"\" db_record = get_latest_record() out_record = dict() out_record['date'] =", "object is a dict with keys ts, fahrenheit, celsius and", "'off': cmd = 'off' else: cmd = 'cool to '", "int(round(out_record['celsius']*9/5.0 + 32)) out_record['humidity'] = int(round(db_record[2])) return out_record if __name__", "row in cur.fetchall(): if row[0][0] == 'h': cmd = 'heat", "commands=command_history(), error=error, last_record=last_record()) @app.route('/submit', methods=['GET', 'POST']) def submit_page(): error =", "session.pop('logged_in', None) flash('You were logged out') return redirect(url_for('welcome_page')) def validate_AC_command(user_mode,", "command; translates command into irsend call.\"\"\" codes = dict() if", "before_request(): g.db = connect_db() @app.teardown_request def teardown_request(exception): db = getattr(g,", "command_history(): \"\"\"Returns a list of dictionaries, each containing a command", "temperature and humidity record data. The returned object is a", "in if not request.form['username'] in app.config['USERNAMES']: error = 'username' elif", "codes['temperature'] = user_temperature if codes['mode'] == 'off': command_postfix = 'off'", "+ codes['temperature'] else: command_postfix = codes['temperature'] codes['command'] = command_postfix return", "codes['mode'] == 'heat': command_postfix = 'heat' + codes['temperature'] else: command_postfix", "of dictionaries, each containing a command issued to the AC", "db_record[0].strftime(\"%Y-%m-%d\") out_record['time'] = db_record[0].strftime(\"%H:%M\") out_record['celsius'] = db_record[1] out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0", "else: cmd = 'cool to ' + row[0] command_history.append(dict(command=cmd, ts=row[1],", "in session and session['username']: return redirect(url_for('submit_page')) error = None if", "order by id desc') command_history = [] for row in", "None if not session.get('username'): abort(401) if request.method == 'POST': #", "id desc') command_history = [] for row in cur.fetchall(): if", "user_temperature): \"\"\"Validates and sanitizes user-input command; translates command into irsend", "None) if db is not None: db.close() @app.route('/', methods=['GET', 'POST'])", "not codes['temperature_error']: codes['mode'] = user_mode codes['temperature'] = user_temperature if codes['mode']", "return redirect(url_for('submit_page')) error = None if request.method == 'POST': #", "%H:%M:%S\"), session['username']]) g.db.commit() flash('Command submitted') return render_template('submit_page.html', commands=command_history(), error=error, last_record=last_record())", "humidity record data. The returned object is a dict with", "executed from the Python interpreter before the application is first", "if 'username' in session and session['username']: return redirect(url_for('submit_page')) error =", "@app.route('/submit', methods=['GET', 'POST']) def submit_page(): error = None if not", "last temperature and humidity record data. The returned object is", "= True else: codes['temperature_error'] = False if not codes['mode_error'] and", "db.close() @app.route('/', methods=['GET', 'POST']) def welcome_page(): if 'username' in session", "'heat' + codes['temperature'] else: command_postfix = codes['temperature'] codes['command'] = command_postfix", "previously. The list is ordered chronologically, from newest to oldest.\"\"\"", "This function must be imported and executed from the Python", "row[0] == 'off': cmd = 'off' else: cmd = 'cool", "= request.form['username'] flash('Hi ' + session['username'] + '!') return redirect(url_for('submit_page'))", "= None if request.method == 'POST': # someone's logging in", "?, ?)', [validation_codes['command'], datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), session['username']]) g.db.commit() flash('Command submitted') return", "not 'off' and user_temperature not in app.config['ACTEMPERATURES']: codes['temperature_error'] = True", "codes['mode'] = user_mode codes['temperature'] = user_temperature if codes['mode'] == 'off':", "import * app = Flask(__name__) app.config.from_object(__name__) # DB helper functions", "'username' in session and session['username']: return redirect(url_for('submit_page')) error = None", "request.form['username'] flash('Hi ' + session['username'] + '!') return redirect(url_for('submit_page')) return", "dict() out_record['date'] = db_record[0].strftime(\"%Y-%m-%d\") out_record['time'] = db_record[0].strftime(\"%H:%M\") out_record['celsius'] = db_record[1]", "codes['temperature_error']: codes['mode'] = user_mode codes['temperature'] = user_temperature if codes['mode'] ==", "logout(): session.pop('logged_in', None) flash('You were logged out') return redirect(url_for('welcome_page')) def" ]
[ "eigenvectors: np.ndarray) -> bool: \"\"\"Check that the eigenvalue equation holds.\"\"\"", "matrix: np.ndarray, eigenvalues: np.ndarray, eigenvectors: np.ndarray) -> bool: \"\"\"Check that", "return np.sqrt(np.dot(vs, vs)) def create_random_matrix(size: int) -> np.array: \"\"\"Create a", "= create_random_matrix(size) return xs + xs.T def check_eigenpairs( matrix: np.ndarray,", "** 2).reshape(size, size) def create_symmetic_matrix(size: int) -> np.array: \"\"\"Create a", "np.ndarray, eigenvalues: np.ndarray, eigenvectors: np.ndarray) -> bool: \"\"\"Check that the", "np.array: \"\"\"Create a numpy symmetric matrix.\"\"\" xs = create_random_matrix(size) return", "def norm(vs: np.array) -> float: \"\"\"Compute the norm of a", "numpy symmetric matrix.\"\"\" xs = create_random_matrix(size) return xs + xs.T", "return xs + xs.T def check_eigenpairs( matrix: np.ndarray, eigenvalues: np.ndarray,", "that the eigenvalue equation holds.\"\"\" for i, value in enumerate(eigenvalues):", "enumerate(eigenvalues): residue = np.dot( matrix, eigenvectors[:, i]) - value *", "create_random_matrix(size) return xs + xs.T def check_eigenpairs( matrix: np.ndarray, eigenvalues:", "import numpy as np def norm(vs: np.array) -> float: \"\"\"Compute", "numpy as np def norm(vs: np.array) -> float: \"\"\"Compute the", "np.dot( matrix, eigenvectors[:, i]) - value * eigenvectors[:, i] assert", "float: \"\"\"Compute the norm of a vector.\"\"\" return np.sqrt(np.dot(vs, vs))", "random matrix.\"\"\" return np.random.normal(size=size ** 2).reshape(size, size) def create_symmetic_matrix(size: int)", "return np.random.normal(size=size ** 2).reshape(size, size) def create_symmetic_matrix(size: int) -> np.array:", "as np def norm(vs: np.array) -> float: \"\"\"Compute the norm", "xs + xs.T def check_eigenpairs( matrix: np.ndarray, eigenvalues: np.ndarray, eigenvectors:", "\"\"\"Check that the eigenvalue equation holds.\"\"\" for i, value in", "np def norm(vs: np.array) -> float: \"\"\"Compute the norm of", "the eigenvalue equation holds.\"\"\" for i, value in enumerate(eigenvalues): residue", "create_symmetic_matrix(size: int) -> np.array: \"\"\"Create a numpy symmetric matrix.\"\"\" xs", "tests.\"\"\" import numpy as np def norm(vs: np.array) -> float:", "+ xs.T def check_eigenpairs( matrix: np.ndarray, eigenvalues: np.ndarray, eigenvectors: np.ndarray)", "symmetric matrix.\"\"\" xs = create_random_matrix(size) return xs + xs.T def", "xs = create_random_matrix(size) return xs + xs.T def check_eigenpairs( matrix:", "of a vector.\"\"\" return np.sqrt(np.dot(vs, vs)) def create_random_matrix(size: int) ->", "value in enumerate(eigenvalues): residue = np.dot( matrix, eigenvectors[:, i]) -", "np.ndarray) -> bool: \"\"\"Check that the eigenvalue equation holds.\"\"\" for", "\"\"\"Helper functions to tests.\"\"\" import numpy as np def norm(vs:", "\"\"\"Create a numpy symmetric matrix.\"\"\" xs = create_random_matrix(size) return xs", "a vector.\"\"\" return np.sqrt(np.dot(vs, vs)) def create_random_matrix(size: int) -> np.array:", "matrix, eigenvectors[:, i]) - value * eigenvectors[:, i] assert norm(residue)", "\"\"\"Create a numpy random matrix.\"\"\" return np.random.normal(size=size ** 2).reshape(size, size)", "bool: \"\"\"Check that the eigenvalue equation holds.\"\"\" for i, value", "matrix.\"\"\" return np.random.normal(size=size ** 2).reshape(size, size) def create_symmetic_matrix(size: int) ->", "= np.dot( matrix, eigenvectors[:, i]) - value * eigenvectors[:, i]", "check_eigenpairs( matrix: np.ndarray, eigenvalues: np.ndarray, eigenvectors: np.ndarray) -> bool: \"\"\"Check", "create_random_matrix(size: int) -> np.array: \"\"\"Create a numpy random matrix.\"\"\" return", "holds.\"\"\" for i, value in enumerate(eigenvalues): residue = np.dot( matrix,", "norm(vs: np.array) -> float: \"\"\"Compute the norm of a vector.\"\"\"", "-> float: \"\"\"Compute the norm of a vector.\"\"\" return np.sqrt(np.dot(vs,", "np.ndarray, eigenvectors: np.ndarray) -> bool: \"\"\"Check that the eigenvalue equation", "eigenvectors[:, i]) - value * eigenvectors[:, i] assert norm(residue) <", "np.sqrt(np.dot(vs, vs)) def create_random_matrix(size: int) -> np.array: \"\"\"Create a numpy", "i]) - value * eigenvectors[:, i] assert norm(residue) < 1e-8", "numpy random matrix.\"\"\" return np.random.normal(size=size ** 2).reshape(size, size) def create_symmetic_matrix(size:", "2).reshape(size, size) def create_symmetic_matrix(size: int) -> np.array: \"\"\"Create a numpy", "to tests.\"\"\" import numpy as np def norm(vs: np.array) ->", "size) def create_symmetic_matrix(size: int) -> np.array: \"\"\"Create a numpy symmetric", "-> np.array: \"\"\"Create a numpy random matrix.\"\"\" return np.random.normal(size=size **", "eigenvalue equation holds.\"\"\" for i, value in enumerate(eigenvalues): residue =", "\"\"\"Compute the norm of a vector.\"\"\" return np.sqrt(np.dot(vs, vs)) def", "vs)) def create_random_matrix(size: int) -> np.array: \"\"\"Create a numpy random", "np.array: \"\"\"Create a numpy random matrix.\"\"\" return np.random.normal(size=size ** 2).reshape(size,", "vector.\"\"\" return np.sqrt(np.dot(vs, vs)) def create_random_matrix(size: int) -> np.array: \"\"\"Create", "int) -> np.array: \"\"\"Create a numpy random matrix.\"\"\" return np.random.normal(size=size", "residue = np.dot( matrix, eigenvectors[:, i]) - value * eigenvectors[:,", "def create_symmetic_matrix(size: int) -> np.array: \"\"\"Create a numpy symmetric matrix.\"\"\"", "in enumerate(eigenvalues): residue = np.dot( matrix, eigenvectors[:, i]) - value", "np.random.normal(size=size ** 2).reshape(size, size) def create_symmetic_matrix(size: int) -> np.array: \"\"\"Create", "i, value in enumerate(eigenvalues): residue = np.dot( matrix, eigenvectors[:, i])", "norm of a vector.\"\"\" return np.sqrt(np.dot(vs, vs)) def create_random_matrix(size: int)", "a numpy symmetric matrix.\"\"\" xs = create_random_matrix(size) return xs +", "matrix.\"\"\" xs = create_random_matrix(size) return xs + xs.T def check_eigenpairs(", "eigenvalues: np.ndarray, eigenvectors: np.ndarray) -> bool: \"\"\"Check that the eigenvalue", "for i, value in enumerate(eigenvalues): residue = np.dot( matrix, eigenvectors[:,", "np.array) -> float: \"\"\"Compute the norm of a vector.\"\"\" return", "-> bool: \"\"\"Check that the eigenvalue equation holds.\"\"\" for i,", "equation holds.\"\"\" for i, value in enumerate(eigenvalues): residue = np.dot(", "a numpy random matrix.\"\"\" return np.random.normal(size=size ** 2).reshape(size, size) def", "def check_eigenpairs( matrix: np.ndarray, eigenvalues: np.ndarray, eigenvectors: np.ndarray) -> bool:", "functions to tests.\"\"\" import numpy as np def norm(vs: np.array)", "-> np.array: \"\"\"Create a numpy symmetric matrix.\"\"\" xs = create_random_matrix(size)", "the norm of a vector.\"\"\" return np.sqrt(np.dot(vs, vs)) def create_random_matrix(size:", "xs.T def check_eigenpairs( matrix: np.ndarray, eigenvalues: np.ndarray, eigenvectors: np.ndarray) ->", "int) -> np.array: \"\"\"Create a numpy symmetric matrix.\"\"\" xs =", "def create_random_matrix(size: int) -> np.array: \"\"\"Create a numpy random matrix.\"\"\"" ]
[ "nums[nums[i]], nums[i] = nums[i], nums[nums[i]] return -1 if __name__ ==", "Solution: def findRepeatNumber(self, nums: List[int]) -> int: # solution one:", "i in range(1, len(nums)): if pre == nums[i]: return nums[i]", "def findRepeatNumber(self, nums: List[int]) -> int: # solution one: 哈希表", "len(nums)): if pre == nums[i]: return nums[i] else: pre =", "nums = [2, 3, 1, 0, 2, 5, 3] print(Solution().findRepeatNumber(nums))", "<filename>solutions/Interview-03-shu-zu-zhong-zhong-fu-de-shu-zi-lcof/03.py<gh_stars>10-100 from typing import List class Solution: def findRepeatNumber(self, nums:", "True else: return nums[i] return -1 # solution two: 排序", "[False for i in range(n)] for i in range(n): if", "range(n)] for i in range(n): if flag[nums[i]] == False: flag[nums[i]]", "# solution two: 排序 nums.sort() pre = nums[0] for i", "交换 else: nums[nums[i]], nums[i] = nums[i], nums[nums[i]] return -1 if", "typing import List class Solution: def findRepeatNumber(self, nums: List[int]) ->", "else: return nums[i] return -1 # solution two: 排序 nums.sort()", "nums[i] else: pre = nums[i] return -1 # solution three:", "= len(nums) for i in range(n): if nums[i] == i:", "class Solution: def findRepeatNumber(self, nums: List[int]) -> int: # solution", "List class Solution: def findRepeatNumber(self, nums: List[int]) -> int: #", "= nums[i] return -1 # solution three: 两个萝卜一个坑 n =", "List[int]) -> int: # solution one: 哈希表 n = len(nums)", "if pre == nums[i]: return nums[i] else: pre = nums[i]", "nums[i] == i: continue # 有重复 elif nums[nums[i]] == nums[i]:", "== False: flag[nums[i]] = True else: return nums[i] return -1", "range(n): if flag[nums[i]] == False: flag[nums[i]] = True else: return", "for i in range(n)] for i in range(n): if flag[nums[i]]", "else: pre = nums[i] return -1 # solution three: 两个萝卜一个坑", "findRepeatNumber(self, nums: List[int]) -> int: # solution one: 哈希表 n", "i in range(n): if nums[i] == i: continue # 有重复", "range(n): if nums[i] == i: continue # 有重复 elif nums[nums[i]]", "return nums[i] return -1 # solution two: 排序 nums.sort() pre", "for i in range(1, len(nums)): if pre == nums[i]: return", "if __name__ == \"__main__\": nums = [2, 3, 1, 0,", "n = len(nums) for i in range(n): if nums[i] ==", "range(1, len(nums)): if pre == nums[i]: return nums[i] else: pre", "if flag[nums[i]] == False: flag[nums[i]] = True else: return nums[i]", "return nums[i] # 交换 else: nums[nums[i]], nums[i] = nums[i], nums[nums[i]]", "= nums[0] for i in range(1, len(nums)): if pre ==", "solution two: 排序 nums.sort() pre = nums[0] for i in", "nums[nums[i]] == nums[i]: return nums[i] # 交换 else: nums[nums[i]], nums[i]", "nums[i] return -1 # solution two: 排序 nums.sort() pre =", "flag[nums[i]] == False: flag[nums[i]] = True else: return nums[i] return", "i in range(n): if flag[nums[i]] == False: flag[nums[i]] = True", "nums[i] # 交换 else: nums[nums[i]], nums[i] = nums[i], nums[nums[i]] return", "in range(n)] for i in range(n): if flag[nums[i]] == False:", "elif nums[nums[i]] == nums[i]: return nums[i] # 交换 else: nums[nums[i]],", "-> int: # solution one: 哈希表 n = len(nums) flag", "return -1 # solution three: 两个萝卜一个坑 n = len(nums) for", "else: nums[nums[i]], nums[i] = nums[i], nums[nums[i]] return -1 if __name__", "nums[nums[i]] return -1 if __name__ == \"__main__\": nums = [2,", "== nums[i]: return nums[i] else: pre = nums[i] return -1", "for i in range(n): if flag[nums[i]] == False: flag[nums[i]] =", "nums.sort() pre = nums[0] for i in range(1, len(nums)): if", "nums[i] return -1 # solution three: 两个萝卜一个坑 n = len(nums)", "nums[i] = nums[i], nums[nums[i]] return -1 if __name__ == \"__main__\":", "int: # solution one: 哈希表 n = len(nums) flag =", "solution three: 两个萝卜一个坑 n = len(nums) for i in range(n):", "nums: List[int]) -> int: # solution one: 哈希表 n =", "i in range(n)] for i in range(n): if flag[nums[i]] ==", "哈希表 n = len(nums) flag = [False for i in", "两个萝卜一个坑 n = len(nums) for i in range(n): if nums[i]", "pre == nums[i]: return nums[i] else: pre = nums[i] return", "one: 哈希表 n = len(nums) flag = [False for i", "= [False for i in range(n)] for i in range(n):", "nums[i]: return nums[i] else: pre = nums[i] return -1 #", "= nums[i], nums[nums[i]] return -1 if __name__ == \"__main__\": nums", "in range(1, len(nums)): if pre == nums[i]: return nums[i] else:", "nums[i], nums[nums[i]] return -1 if __name__ == \"__main__\": nums =", "in range(n): if flag[nums[i]] == False: flag[nums[i]] = True else:", "# solution one: 哈希表 n = len(nums) flag = [False", "solution one: 哈希表 n = len(nums) flag = [False for", "return nums[i] else: pre = nums[i] return -1 # solution", "-1 if __name__ == \"__main__\": nums = [2, 3, 1,", "# 交换 else: nums[nums[i]], nums[i] = nums[i], nums[nums[i]] return -1", "two: 排序 nums.sort() pre = nums[0] for i in range(1,", "== i: continue # 有重复 elif nums[nums[i]] == nums[i]: return", "return -1 if __name__ == \"__main__\": nums = [2, 3,", "排序 nums.sort() pre = nums[0] for i in range(1, len(nums)):", "False: flag[nums[i]] = True else: return nums[i] return -1 #", "import List class Solution: def findRepeatNumber(self, nums: List[int]) -> int:", "__name__ == \"__main__\": nums = [2, 3, 1, 0, 2,", "有重复 elif nums[nums[i]] == nums[i]: return nums[i] # 交换 else:", "= len(nums) flag = [False for i in range(n)] for", "== \"__main__\": nums = [2, 3, 1, 0, 2, 5,", "len(nums) for i in range(n): if nums[i] == i: continue", "pre = nums[0] for i in range(1, len(nums)): if pre", "# 有重复 elif nums[nums[i]] == nums[i]: return nums[i] # 交换", "return -1 # solution two: 排序 nums.sort() pre = nums[0]", "continue # 有重复 elif nums[nums[i]] == nums[i]: return nums[i] #", "nums[0] for i in range(1, len(nums)): if pre == nums[i]:", "-1 # solution three: 两个萝卜一个坑 n = len(nums) for i", "pre = nums[i] return -1 # solution three: 两个萝卜一个坑 n", "n = len(nums) flag = [False for i in range(n)]", "= True else: return nums[i] return -1 # solution two:", "len(nums) flag = [False for i in range(n)] for i", "if nums[i] == i: continue # 有重复 elif nums[nums[i]] ==", "nums[i]: return nums[i] # 交换 else: nums[nums[i]], nums[i] = nums[i],", "flag = [False for i in range(n)] for i in", "i: continue # 有重复 elif nums[nums[i]] == nums[i]: return nums[i]", "\"__main__\": nums = [2, 3, 1, 0, 2, 5, 3]", "== nums[i]: return nums[i] # 交换 else: nums[nums[i]], nums[i] =", "flag[nums[i]] = True else: return nums[i] return -1 # solution", "three: 两个萝卜一个坑 n = len(nums) for i in range(n): if", "# solution three: 两个萝卜一个坑 n = len(nums) for i in", "from typing import List class Solution: def findRepeatNumber(self, nums: List[int])", "in range(n): if nums[i] == i: continue # 有重复 elif", "for i in range(n): if nums[i] == i: continue #", "-1 # solution two: 排序 nums.sort() pre = nums[0] for" ]
[ "GWUNetwork from gwu_nn.layers import Dense from gwu_nn.activation_layers import Sigmoid np.random.seed(8)", "logit colors = ['red'] * num_obs + ['blue'] * num_obs", "print(X.shape) y = np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape) # colors = ['red']", "we don't cheat X_train, X_test, y_train, y_test = train_test_split(X, y,", "np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape) # colors = ['red'] * num_obs +", "= ['red'] * num_obs + ['blue'] * num_obs plt.figure(figsize=(12, 8))", "draw from two distinct 2D normal distributions x1 = np.random.multivariate_normal([0,", "plt.figure(figsize=(12, 8)) plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5) # Range", "x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs) x2 =", "= np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs) x2 = np.random.multivariate_normal([3,", "0], X[:, 1], c = colors, alpha = 0.5) #", "from sklearn.model_selection import train_test_split from gwu_nn.gwu_network import GWUNetwork from gwu_nn.layers", ".75],[.75, 1]], num_obs) x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]],", "= GWUNetwork() network.add(Dense(2, 1, True, 'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss', 0.001)", "1]], num_obs) # Stack our inputs into one feature space", "= np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs) # Stack our", "<reponame>Charles-Peeke/gwu_nn<gh_stars>1-10 import numpy as np import matplotlib.pyplot as plt from", "1]], num_obs) x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs)", "testing sets so we don't cheat X_train, X_test, y_train, y_test", "our model network = GWUNetwork() network.add(Dense(2, 1, True, 'sigmoid')) network.add(Sigmoid())", "features to draw from two distinct 2D normal distributions x1", "from gwu_nn.activation_layers import Sigmoid np.random.seed(8) num_obs = 8000 # Create", "y_train, epochs=100) from scipy.special import logit colors = ['red'] *", "plt.figure(figsize=(12,8)) # plt.scatter(X[:, 0], X[:, 1], c = colors, alpha", "so we don't cheat X_train, X_test, y_train, y_test = train_test_split(X,", "# Stack our inputs into one feature space X =", "matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from gwu_nn.gwu_network import", "# Create our model network = GWUNetwork() network.add(Dense(2, 1, True,", "values start_x1 = -5 end_x1 = 7 weights = network.layers[0].weights.reshape(-1).tolist()", "epochs=100) from scipy.special import logit colors = ['red'] * num_obs", "Sigmoid np.random.seed(8) num_obs = 8000 # Create our features to", "y = np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape) # colors = ['red'] *", "* weights[0] - logit(0.5)) / - weights[1] end_y = (bias", "y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) # Create our", "0.001) network.fit(X_train, y_train, epochs=100) from scipy.special import logit colors =", "network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss', 0.001) network.fit(X_train, y_train, epochs=100) from scipy.special import", "= colors, alpha = 0.5) # Lets randomly split things", "import GWUNetwork from gwu_nn.layers import Dense from gwu_nn.activation_layers import Sigmoid", "0.5) # Lets randomly split things into training and testing", "distinct 2D normal distributions x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75,", "alpha=0.5) # Range of our X values start_x1 = -5", "#network.set_loss('mse') network.compile('log_loss', 0.001) network.fit(X_train, y_train, epochs=100) from scipy.special import logit", "colors = ['red'] * num_obs + ['blue'] * num_obs plt.figure(figsize=(12,", "+ start_x1 * weights[0] - logit(0.5)) / - weights[1] end_y", "# colors = ['red'] * num_obs + ['blue'] * num_obs", "from scipy.special import logit colors = ['red'] * num_obs +", "x2)) print(X.shape) y = np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape) # colors =", "distributions x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs) x2", "0], X[:, 1], c=colors, alpha=0.5) # Range of our X", "as plt from sklearn.model_selection import train_test_split from gwu_nn.gwu_network import GWUNetwork", "test_size=0.33, random_state=42) # Create our model network = GWUNetwork() network.add(Dense(2,", "from gwu_nn.gwu_network import GWUNetwork from gwu_nn.layers import Dense from gwu_nn.activation_layers", "* weights[0] - logit(0.5)) / -weights[1] plt.plot([start_x1, end_x1], [start_y, end_y],", "2D normal distributions x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]],", "num_obs) # Stack our inputs into one feature space X", "np.ones(num_obs))) print(y.shape) # colors = ['red'] * num_obs + ['blue']", "network = GWUNetwork() network.add(Dense(2, 1, True, 'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss',", "our X values start_x1 = -5 end_x1 = 7 weights", "= np.vstack((x1, x2)) print(X.shape) y = np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape) #", "True, 'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss', 0.001) network.fit(X_train, y_train, epochs=100) from", "import train_test_split from gwu_nn.gwu_network import GWUNetwork from gwu_nn.layers import Dense", "print(y.shape) # colors = ['red'] * num_obs + ['blue'] *", "training and testing sets so we don't cheat X_train, X_test,", "network.layers[0].bias[0][0] start_y = (bias + start_x1 * weights[0] - logit(0.5))", "random_state=42) # Create our model network = GWUNetwork() network.add(Dense(2, 1,", "num_obs + ['blue'] * num_obs # plt.figure(figsize=(12,8)) # plt.scatter(X[:, 0],", "1, True, 'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss', 0.001) network.fit(X_train, y_train, epochs=100)", "+ ['blue'] * num_obs plt.figure(figsize=(12, 8)) plt.scatter(X[:, 0], X[:, 1],", "(bias + start_x1 * weights[0] - logit(0.5)) / - weights[1]", "gwu_nn.activation_layers import Sigmoid np.random.seed(8) num_obs = 8000 # Create our", "plt.scatter(X[:, 0], X[:, 1], c = colors, alpha = 0.5)", "plt from sklearn.model_selection import train_test_split from gwu_nn.gwu_network import GWUNetwork from", "* num_obs + ['blue'] * num_obs # plt.figure(figsize=(12,8)) # plt.scatter(X[:,", "of our X values start_x1 = -5 end_x1 = 7", "-5 end_x1 = 7 weights = network.layers[0].weights.reshape(-1).tolist() bias = network.layers[0].bias[0][0]", "X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) # Create", "* num_obs plt.figure(figsize=(12, 8)) plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5)", "'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss', 0.001) network.fit(X_train, y_train, epochs=100) from scipy.special", "Lets randomly split things into training and testing sets so", "into one feature space X = np.vstack((x1, x2)) print(X.shape) y", "= ['red'] * num_obs + ['blue'] * num_obs # plt.figure(figsize=(12,8))", "8000 # Create our features to draw from two distinct", "* num_obs + ['blue'] * num_obs plt.figure(figsize=(12, 8)) plt.scatter(X[:, 0],", "our features to draw from two distinct 2D normal distributions", "colors, alpha = 0.5) # Lets randomly split things into", "logit(0.5)) / - weights[1] end_y = (bias + end_x1 *", "end_x1 * weights[0] - logit(0.5)) / -weights[1] plt.plot([start_x1, end_x1], [start_y,", "= (bias + end_x1 * weights[0] - logit(0.5)) / -weights[1]", "Create our features to draw from two distinct 2D normal", "into training and testing sets so we don't cheat X_train,", "model network = GWUNetwork() network.add(Dense(2, 1, True, 'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse')", "np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs) x2 = np.random.multivariate_normal([3, 8],", "* num_obs # plt.figure(figsize=(12,8)) # plt.scatter(X[:, 0], X[:, 1], c", "# Range of our X values start_x1 = -5 end_x1", "numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import", "Range of our X values start_x1 = -5 end_x1 =", "import Dense from gwu_nn.activation_layers import Sigmoid np.random.seed(8) num_obs = 8000", "alpha = 0.5) # Lets randomly split things into training", "['red'] * num_obs + ['blue'] * num_obs # plt.figure(figsize=(12,8)) #", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) #", "sets so we don't cheat X_train, X_test, y_train, y_test =", "# plt.figure(figsize=(12,8)) # plt.scatter(X[:, 0], X[:, 1], c = colors,", "= (bias + start_x1 * weights[0] - logit(0.5)) / -", "weights[0] - logit(0.5)) / - weights[1] end_y = (bias +", "1], c = colors, alpha = 0.5) # Lets randomly", "= np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape) # colors = ['red'] * num_obs", "X values start_x1 = -5 end_x1 = 7 weights =", "sklearn.model_selection import train_test_split from gwu_nn.gwu_network import GWUNetwork from gwu_nn.layers import", "# plt.scatter(X[:, 0], X[:, 1], c = colors, alpha =", "import Sigmoid np.random.seed(8) num_obs = 8000 # Create our features", "Stack our inputs into one feature space X = np.vstack((x1,", "(bias + end_x1 * weights[0] - logit(0.5)) / -weights[1] plt.plot([start_x1,", "= network.layers[0].weights.reshape(-1).tolist() bias = network.layers[0].bias[0][0] start_y = (bias + start_x1", "y, test_size=0.33, random_state=42) # Create our model network = GWUNetwork()", "7 weights = network.layers[0].weights.reshape(-1).tolist() bias = network.layers[0].bias[0][0] start_y = (bias", "our inputs into one feature space X = np.vstack((x1, x2))", "import logit colors = ['red'] * num_obs + ['blue'] *", "/ - weights[1] end_y = (bias + end_x1 * weights[0]", "np.random.seed(8) num_obs = 8000 # Create our features to draw", "8], [[1, .25],[.25, 1]], num_obs) # Stack our inputs into", "from two distinct 2D normal distributions x1 = np.random.multivariate_normal([0, 0],", "np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from", "X = np.vstack((x1, x2)) print(X.shape) y = np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape)", "from gwu_nn.layers import Dense from gwu_nn.activation_layers import Sigmoid np.random.seed(8) num_obs", "space X = np.vstack((x1, x2)) print(X.shape) y = np.hstack((np.zeros(num_obs), np.ones(num_obs)))", "+ ['blue'] * num_obs # plt.figure(figsize=(12,8)) # plt.scatter(X[:, 0], X[:,", "c = colors, alpha = 0.5) # Lets randomly split", "num_obs plt.figure(figsize=(12, 8)) plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5) #", "end_y = (bias + end_x1 * weights[0] - logit(0.5)) /", "train_test_split(X, y, test_size=0.33, random_state=42) # Create our model network =", "# Create our features to draw from two distinct 2D", "+ end_x1 * weights[0] - logit(0.5)) / -weights[1] plt.plot([start_x1, end_x1],", "num_obs = 8000 # Create our features to draw from", "things into training and testing sets so we don't cheat", "gwu_nn.layers import Dense from gwu_nn.activation_layers import Sigmoid np.random.seed(8) num_obs =", "inputs into one feature space X = np.vstack((x1, x2)) print(X.shape)", "= network.layers[0].bias[0][0] start_y = (bias + start_x1 * weights[0] -", "= -5 end_x1 = 7 weights = network.layers[0].weights.reshape(-1).tolist() bias =", "network.layers[0].weights.reshape(-1).tolist() bias = network.layers[0].bias[0][0] start_y = (bias + start_x1 *", "network.fit(X_train, y_train, epochs=100) from scipy.special import logit colors = ['red']", "import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from gwu_nn.gwu_network", "colors = ['red'] * num_obs + ['blue'] * num_obs #", "and testing sets so we don't cheat X_train, X_test, y_train,", "= train_test_split(X, y, test_size=0.33, random_state=42) # Create our model network", "Dense from gwu_nn.activation_layers import Sigmoid np.random.seed(8) num_obs = 8000 #", "weights[1] end_y = (bias + end_x1 * weights[0] - logit(0.5))", "- logit(0.5)) / - weights[1] end_y = (bias + end_x1", "start_x1 = -5 end_x1 = 7 weights = network.layers[0].weights.reshape(-1).tolist() bias", "GWUNetwork() network.add(Dense(2, 1, True, 'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss', 0.001) network.fit(X_train,", "np.vstack((x1, x2)) print(X.shape) y = np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape) # colors", "np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs) # Stack our inputs", "gwu_nn.gwu_network import GWUNetwork from gwu_nn.layers import Dense from gwu_nn.activation_layers import", "bias = network.layers[0].bias[0][0] start_y = (bias + start_x1 * weights[0]", "weights[0] - logit(0.5)) / -weights[1] plt.plot([start_x1, end_x1], [start_y, end_y], color='grey')", "- weights[1] end_y = (bias + end_x1 * weights[0] -", "x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs) # Stack", "plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5) # Range of our", "weights = network.layers[0].weights.reshape(-1).tolist() bias = network.layers[0].bias[0][0] start_y = (bias +", "two distinct 2D normal distributions x1 = np.random.multivariate_normal([0, 0], [[1,", "feature space X = np.vstack((x1, x2)) print(X.shape) y = np.hstack((np.zeros(num_obs),", "randomly split things into training and testing sets so we", "split things into training and testing sets so we don't", "= 7 weights = network.layers[0].weights.reshape(-1).tolist() bias = network.layers[0].bias[0][0] start_y =", "num_obs # plt.figure(figsize=(12,8)) # plt.scatter(X[:, 0], X[:, 1], c =", "num_obs) x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs) #", "train_test_split from gwu_nn.gwu_network import GWUNetwork from gwu_nn.layers import Dense from", "end_x1 = 7 weights = network.layers[0].weights.reshape(-1).tolist() bias = network.layers[0].bias[0][0] start_y", "y_test = train_test_split(X, y, test_size=0.33, random_state=42) # Create our model", "as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split", "to draw from two distinct 2D normal distributions x1 =", "don't cheat X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33,", "X[:, 1], c=colors, alpha=0.5) # Range of our X values", "num_obs + ['blue'] * num_obs plt.figure(figsize=(12, 8)) plt.scatter(X[:, 0], X[:,", "one feature space X = np.vstack((x1, x2)) print(X.shape) y =", "scipy.special import logit colors = ['red'] * num_obs + ['blue']", "start_x1 * weights[0] - logit(0.5)) / - weights[1] end_y =", "network.compile('log_loss', 0.001) network.fit(X_train, y_train, epochs=100) from scipy.special import logit colors", "= 0.5) # Lets randomly split things into training and", "[[1, .25],[.25, 1]], num_obs) # Stack our inputs into one", "# Lets randomly split things into training and testing sets", "start_y = (bias + start_x1 * weights[0] - logit(0.5)) /", "8)) plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5) # Range of", "normal distributions x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs)", "[[1, .75],[.75, 1]], num_obs) x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25,", "['red'] * num_obs + ['blue'] * num_obs plt.figure(figsize=(12, 8)) plt.scatter(X[:,", "['blue'] * num_obs # plt.figure(figsize=(12,8)) # plt.scatter(X[:, 0], X[:, 1],", "1], c=colors, alpha=0.5) # Range of our X values start_x1", "= 8000 # Create our features to draw from two", "X[:, 1], c = colors, alpha = 0.5) # Lets", "Create our model network = GWUNetwork() network.add(Dense(2, 1, True, 'sigmoid'))", "0], [[1, .75],[.75, 1]], num_obs) x2 = np.random.multivariate_normal([3, 8], [[1,", "cheat X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)", "c=colors, alpha=0.5) # Range of our X values start_x1 =", "import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection", "network.add(Dense(2, 1, True, 'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss', 0.001) network.fit(X_train, y_train,", "['blue'] * num_obs plt.figure(figsize=(12, 8)) plt.scatter(X[:, 0], X[:, 1], c=colors,", ".25],[.25, 1]], num_obs) # Stack our inputs into one feature" ]
[ "get_form_factor(element_name=elem1.symbol, water=water) form_factor2 = get_form_factor(element_name=elem2.symbol, water=water) coeff = form_factor1 *", "r : numpy.ndarray r positions generated by histogram binning g_r_t", "import mdtraj as md from progressbar import ProgressBar from scattering.utils.utils", "Van Hove function chunk_length : int length of time between", "= None pbar = ProgressBar() for i in pbar(range(n_chunks)): times", "be considered, in the style of MDTraj atom selection r_range", "binning g_r_t : numpy.ndarray Van Hove function at each time", "and position \"\"\" n_physical_atoms = len([a for a in trj.top.atoms", "the partial van Hove function of a trajectory Parameters ----------", "a selection(s). Results may not be ' 'direcitly comprable to", "selection2='element {}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation, periodic=periodic, opt=opt) partial_dict[(elem1, elem2)]", "in range(chunk_length): times.append([chunk_length*i, chunk_length*i+j]) r, g_r_t_frame = md.compute_rdf_t( traj=trj, pairs=pairs,", "for key, val in partial_dict.items(): elem1, elem2 = key concentration1", "= dict() for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2): print('doing {0}", ": mdtraj.Trajectory trajectory on which to compute the Van Hove", "Minimum and maximum radii. bin_width : float, optional, default=0.005 Width", ") # Don't need to store it, but this serves", "Whether or not to include the self-self correlations Returns -------", "if a.element.mass > 0]) unique_elements = list(set([a.element for a in", "considered, in the style of MDTraj atom selection r_range :", "def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None, r_range=(0, 1.0), bin_width=0.005, n_bins=200, self_correlation=True,", "opt=True, partial=False): \"\"\"Compute the partial van Hove function of a", ": int length of time between restarting averaging selection1 :", "optional, default=0.005 Width of the bins in nanometers. n_bins :", "the style of MDTraj atom selection selection2 : str selection", "r_range=(0, 1.0), bin_width=0.005, n_bins=None, self_correlation=True, periodic=True, opt=True, partial=False): \"\"\"Compute the", "compute the Van Hove function chunk_length : int length of", "get_dt(trj) pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks = int(trj.n_frames / chunk_length)", "chunk_length : int length of time between restarting averaging selection1", "scattering.utils.utils import get_dt from scattering.utils.constants import get_form_factor def compute_van_hove(trj, chunk_length,", "position \"\"\" unique_elements = ( set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]),", "times = list() for j in range(chunk_length): times.append([chunk_length*i, chunk_length*i+j]) r,", "form_factor1 * concentration1 * form_factor2 * concentration2 if g_r_t is", "trj.top.atoms if a.element.mass > 0])) partial_dict = dict() for elem1,", "trj.time[:chunk_length] return r, t, g_r_t_final def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None,", "Parameters ---------- trj : mdtraj.Trajectory trajectory on which to compute", "1 for val in unique_elements]): raise UserWarning( 'Multiple elements found", "trj.top.atoms if a.element.mass > 0]) unique_elements = list(set([a.element for a", "> 1 for val in unique_elements]): raise UserWarning( 'Multiple elements", "form_factor2 * concentration2 if g_r_t is None: g_r_t = np.zeros_like(val)", "be ' 'direcitly comprable to scattering experiments.' ) # Don't", "t, g_r_t_final def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None, r_range=(0, 1.0), bin_width=0.005,", "trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks = int(trj.n_frames / chunk_length) g_r_t = None", "function at each time and position \"\"\" n_physical_atoms = len([a", "periodic=periodic, opt=opt, ) if g_r_t is None: g_r_t = np.zeros_like(g_r_t_frame)", "self_correlation=True, periodic=True, opt=True): \"\"\"Compute the partial van Hove function of", "import get_dt from scattering.utils.constants import get_form_factor def compute_van_hove(trj, chunk_length, water=False,", "selection2=None, r_range=(0, 1.0), bin_width=0.005, n_bins=200, self_correlation=True, periodic=True, opt=True): \"\"\"Compute the", "norm = 0 g_r_t = None for key, val in", "g_r_t_frame = md.compute_rdf_t( traj=trj, pairs=pairs, times=times, r_range=r_range, bin_width=bin_width, n_bins=n_bins, period_length=chunk_length,", ") if g_r_t is None: g_r_t = np.zeros_like(g_r_t_frame) g_r_t +=", "array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum and maximum radii. bin_width", "in a selection(s). Results may not be ' 'direcitly comprable", "and {1} ...'.format(elem1, elem2)) r, g_r_t_partial = compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element", "optional, default=None The number of bins. If specified, this will", "a in trj.top.atoms if a.element.mass > 0]) unique_elements = list(set([a.element", "np.zeros_like(val) g_r_t += val * coeff norm += coeff #", "= get_form_factor(element_name=elem1.symbol, water=water) form_factor2 = get_form_factor(element_name=elem2.symbol, water=water) coeff = form_factor1", "found in a selection(s). Results may not be ' 'direcitly", "the discretization in both r and t g_r_t_final = np.empty(shape=(chunk_length,", "that account for polarization r_range : array-like, shape=(2,), optional, default=(0.0,", "the bins in nanometers. n_bins : int, optional, default=None The", "n_bins=None, self_correlation=True, periodic=True, opt=True, partial=False): \"\"\"Compute the partial van Hove", "axis=0) g_r_t_final /= norm t = trj.time[:chunk_length] return r, t,", "trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if any([len(val) > 1 for val in unique_elements]):", "partial van Hove function of a trajectory Parameters ---------- trj", "md from progressbar import ProgressBar from scattering.utils.utils import get_dt from", "bins. If specified, this will override the `bin_width` parameter. self_correlation", "that dt is constant dt = get_dt(trj) pairs = trj.top.select_pairs(selection1=selection1,", "None: g_r_t = np.zeros_like(val) g_r_t += val * coeff norm", "for a in trj.top.atoms if a.element.mass > 0]) unique_elements =", "r positions generated by histogram binning g_r_t : numpy.ndarray Van", "serves to check that dt is constant dt = get_dt(trj)", "g_r_t to better represent the discretization in both r and", "bin_width : float, optional, default=0.005 Width of the bins in", "to include the self-self correlations Returns ------- r : numpy.ndarray", "trj : mdtraj.Trajectory trajectory on which to compute the Van", "np.empty(shape=(chunk_length, len(r))) for i in range(chunk_length): g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length],", "MDTraj atom selection selection2 : str selection to be considered,", "None pbar = ProgressBar() for i in pbar(range(n_chunks)): times =", "itertools as it import numpy as np import mdtraj as", "Hove function chunk_length : int length of time between restarting", "period_length=chunk_length, self_correlation=self_correlation, periodic=periodic, opt=opt, ) if g_r_t is None: g_r_t", "atom selection selection2 : str selection to be considered, in", "g_r_t is None: g_r_t = np.zeros_like(g_r_t_frame) g_r_t += g_r_t_frame return", "selection1='element {}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation, periodic=periodic, opt=opt)", "default=(0.0, 1.0) Minimum and maximum radii. bin_width : float, optional,", "range(chunk_length): times.append([chunk_length*i, chunk_length*i+j]) r, g_r_t_frame = md.compute_rdf_t( traj=trj, pairs=pairs, times=times,", "generated by histogram binning g_r_t : numpy.ndarray Van Hove function", "trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms", "= ProgressBar() for i in pbar(range(n_chunks)): times = list() for", "selection(s). Results may not be ' 'direcitly comprable to scattering", "specified, this will override the `bin_width` parameter. self_correlation : bool,", "factors for water that account for polarization r_range : array-like,", "numpy.ndarray r positions generated by histogram binning g_r_t : numpy.ndarray", "opt=True): \"\"\"Compute the partial van Hove function of a trajectory", "scattering.utils.constants import get_form_factor def compute_van_hove(trj, chunk_length, water=False, r_range=(0, 1.0), bin_width=0.005,", "correlations Returns ------- r : numpy.ndarray r positions generated by", "from progressbar import ProgressBar from scattering.utils.utils import get_dt from scattering.utils.constants", "time and position \"\"\" unique_elements = ( set([a.element for a", "use X-ray form factors for water that account for polarization", "= list() for j in range(chunk_length): times.append([chunk_length*i, chunk_length*i+j]) r, g_r_t_frame", "= trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms form_factor1 = get_form_factor(element_name=elem1.symbol, water=water) form_factor2", "in trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if any([len(val) > 1 for val in", "selection2 : str selection to be considered, in the style", "time between restarting averaging water : bool use X-ray form", "Results may not be ' 'direcitly comprable to scattering experiments.'", "set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if any([len(val) > 1", "for val in unique_elements]): raise UserWarning( 'Multiple elements found in", "as np import mdtraj as md from progressbar import ProgressBar", "pbar = ProgressBar() for i in pbar(range(n_chunks)): times = list()", "bins in nanometers. n_bins : int, optional, default=None The number", "# Reshape g_r_t to better represent the discretization in both", "include the self-self correlations Returns ------- r : numpy.ndarray r", "g_r_t = None pbar = ProgressBar() for i in pbar(range(n_chunks)):", "{}'.format(elem1.symbol))).n_atoms / n_physical_atoms concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms form_factor1", "check that dt is constant dt = get_dt(trj) pairs =", "n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation, periodic=periodic, opt=opt, ) if g_r_t is None:", "form_factor2 = get_form_factor(element_name=elem2.symbol, water=water) coeff = form_factor1 * concentration1 *", "histogram binning g_r_t : numpy.ndarray Van Hove function at each", "length of time between restarting averaging selection1 : str selection", "form factors for water that account for polarization r_range :", "is None: g_r_t = np.zeros_like(g_r_t_frame) g_r_t += g_r_t_frame return r,", "g_r_t_partial = compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width,", "nanometers. n_bins : int, optional, default=None The number of bins.", "* form_factor2 * concentration2 if g_r_t is None: g_r_t =", "account for polarization r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)", "in unique_elements]): raise UserWarning( 'Multiple elements found in a selection(s).", "concentration2 if g_r_t is None: g_r_t = np.zeros_like(val) g_r_t +=", "0 g_r_t = None for key, val in partial_dict.items(): elem1,", "self_correlation=self_correlation, periodic=periodic, opt=opt, ) if g_r_t is None: g_r_t =", "time and position \"\"\" n_physical_atoms = len([a for a in", "to compute the Van Hove function chunk_length : int length", "Hove function at each time and position \"\"\" n_physical_atoms =", "elem2 = key concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms concentration2", "= trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms /", "bool use X-ray form factors for water that account for", "= 0 g_r_t = None for key, val in partial_dict.items():", "elem2 in it.combinations_with_replacement(unique_elements[::-1], 2): print('doing {0} and {1} ...'.format(elem1, elem2))", "chunk_length : int length of time between restarting averaging water", "not to include the self-self correlations Returns ------- r :", "in both r and t g_r_t_final = np.empty(shape=(chunk_length, len(r))) for", "> 0]) unique_elements = list(set([a.element for a in trj.top.atoms if", "import itertools as it import numpy as np import mdtraj", "---------- trj : mdtraj.Trajectory trajectory on which to compute the", "def compute_van_hove(trj, chunk_length, water=False, r_range=(0, 1.0), bin_width=0.005, n_bins=None, self_correlation=True, periodic=True,", "r, g_r_t_partial = compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol), r_range=r_range,", "len([a for a in trj.top.atoms if a.element.mass > 0]) unique_elements", "= compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width, n_bins=n_bins,", "np import mdtraj as md from progressbar import ProgressBar from", "which to compute the Van Hove function chunk_length : int", "X-ray form factors for water that account for polarization r_range", "of bins. If specified, this will override the `bin_width` parameter.", "for water that account for polarization r_range : array-like, shape=(2,),", "if a.element.mass > 0])) partial_dict = dict() for elem1, elem2", "for polarization r_range : array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum", "g_r_t = np.zeros_like(val) g_r_t += val * coeff norm +=", "int length of time between restarting averaging selection1 : str", "this will override the `bin_width` parameter. self_correlation : bool, default=True", "partial=False): \"\"\"Compute the partial van Hove function of a trajectory", "* coeff norm += coeff # Reshape g_r_t to better", "1.0) Minimum and maximum radii. bin_width : float, optional, default=0.005", "compute_van_hove(trj, chunk_length, water=False, r_range=(0, 1.0), bin_width=0.005, n_bins=None, self_correlation=True, periodic=True, opt=True,", "= key concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms concentration2 =", "import ProgressBar from scattering.utils.utils import get_dt from scattering.utils.constants import get_form_factor", "val in partial_dict.items(): elem1, elem2 = key concentration1 = trj.atom_slice(trj.top.select('element", "r_range=(0, 1.0), bin_width=0.005, n_bins=200, self_correlation=True, periodic=True, opt=True): \"\"\"Compute the partial", "( set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for a in", "key, val in partial_dict.items(): elem1, elem2 = key concentration1 =", "number of bins. If specified, this will override the `bin_width`", "in pbar(range(n_chunks)): times = list() for j in range(chunk_length): times.append([chunk_length*i,", "/ n_physical_atoms concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms form_factor1 =", "this serves to check that dt is constant dt =", "+= coeff # Reshape g_r_t to better represent the discretization", "to store it, but this serves to check that dt", "= None for key, val in partial_dict.items(): elem1, elem2 =", "if g_r_t is None: g_r_t = np.zeros_like(g_r_t_frame) g_r_t += g_r_t_frame", "n_bins : int, optional, default=None The number of bins. If", "of MDTraj atom selection selection2 : str selection to be", "self-self correlations Returns ------- r : numpy.ndarray r positions generated", ": int length of time between restarting averaging water :", "{}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation, periodic=periodic, opt=opt) partial_dict[(elem1, elem2)] =", "as it import numpy as np import mdtraj as md", "elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2): print('doing {0} and {1} ...'.format(elem1,", ": numpy.ndarray r positions generated by histogram binning g_r_t :", ") if any([len(val) > 1 for val in unique_elements]): raise", "traj=trj, pairs=pairs, times=times, r_range=r_range, bin_width=bin_width, n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation, periodic=periodic, opt=opt,", "{}'.format(elem2.symbol))).n_atoms / n_physical_atoms form_factor1 = get_form_factor(element_name=elem1.symbol, water=water) form_factor2 = get_form_factor(element_name=elem2.symbol,", "optional, default=(0.0, 1.0) Minimum and maximum radii. bin_width : float,", "concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms form_factor1 = get_form_factor(element_name=elem1.symbol, water=water)", "= trj.time[:chunk_length] return r, t, g_r_t_final def compute_partial_van_hove(trj, chunk_length=10, selection1=None,", "g_r_t += val * coeff norm += coeff # Reshape", "...'.format(elem1, elem2)) r, g_r_t_partial = compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol), selection2='element", "bin_width=0.005, n_bins=200, self_correlation=True, periodic=True, opt=True): \"\"\"Compute the partial van Hove", "get_form_factor(element_name=elem2.symbol, water=water) coeff = form_factor1 * concentration1 * form_factor2 *", "function at each time and position \"\"\" unique_elements = (", "function chunk_length : int length of time between restarting averaging", "pbar(range(n_chunks)): times = list() for j in range(chunk_length): times.append([chunk_length*i, chunk_length*i+j])", "g_r_t : numpy.ndarray Van Hove function at each time and", "\"\"\" unique_elements = ( set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element", ": float, optional, default=0.005 Width of the bins in nanometers.", "partial: return partial_dict norm = 0 g_r_t = None for", "g_r_t is None: g_r_t = np.zeros_like(val) g_r_t += val *", "water that account for polarization r_range : array-like, shape=(2,), optional,", "Returns ------- r : numpy.ndarray r positions generated by histogram", "'Multiple elements found in a selection(s). Results may not be", "restarting averaging selection1 : str selection to be considered, in", "np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final /= norm t = trj.time[:chunk_length] return r,", "j in range(chunk_length): times.append([chunk_length*i, chunk_length*i+j]) r, g_r_t_frame = md.compute_rdf_t( traj=trj,", "r, g_r_t_frame = md.compute_rdf_t( traj=trj, pairs=pairs, times=times, r_range=r_range, bin_width=bin_width, n_bins=n_bins,", "coeff = form_factor1 * concentration1 * form_factor2 * concentration2 if", "scattering experiments.' ) # Don't need to store it, but", "function of a trajectory Parameters ---------- trj : mdtraj.Trajectory trajectory", "Hove function of a trajectory Parameters ---------- trj : mdtraj.Trajectory", "for a in trj.top.atoms if a.element.mass > 0])) partial_dict =", "= md.compute_rdf_t( traj=trj, pairs=pairs, times=times, r_range=r_range, bin_width=bin_width, n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation,", ": numpy.ndarray Van Hove function at each time and position", "UserWarning( 'Multiple elements found in a selection(s). Results may not", "r_range : array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum and maximum", "print('doing {0} and {1} ...'.format(elem1, elem2)) r, g_r_t_partial = compute_partial_van_hove(trj=trj,", "Reshape g_r_t to better represent the discretization in both r", "g_r_t_final /= norm t = trj.time[:chunk_length] return r, t, g_r_t_final", ": bool use X-ray form factors for water that account", "selection1 : str selection to be considered, in the style", "/ chunk_length) g_r_t = None pbar = ProgressBar() for i", "opt=opt, ) if g_r_t is None: g_r_t = np.zeros_like(g_r_t_frame) g_r_t", "= form_factor1 * concentration1 * form_factor2 * concentration2 if g_r_t", "in trj.top.atoms if a.element.mass > 0])) partial_dict = dict() for", "the `bin_width` parameter. self_correlation : bool, default=True Whether or not", "compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation,", "n_chunks = int(trj.n_frames / chunk_length) g_r_t = None pbar =", "both r and t g_r_t_final = np.empty(shape=(chunk_length, len(r))) for i", "between restarting averaging selection1 : str selection to be considered,", ":] = np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final /= norm t = trj.time[:chunk_length]", "form_factor1 = get_form_factor(element_name=elem1.symbol, water=water) form_factor2 = get_form_factor(element_name=elem2.symbol, water=water) coeff =", "r, t, g_r_t_final def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None, r_range=(0, 1.0),", "partial_dict[(elem1, elem2)] = g_r_t_partial if partial: return partial_dict norm =", "water : bool use X-ray form factors for water that", "= len([a for a in trj.top.atoms if a.element.mass > 0])", "* concentration2 if g_r_t is None: g_r_t = np.zeros_like(val) g_r_t", "a trajectory Parameters ---------- trj : mdtraj.Trajectory trajectory on which", ": str selection to be considered, in the style of", "import get_form_factor def compute_van_hove(trj, chunk_length, water=False, r_range=(0, 1.0), bin_width=0.005, n_bins=None,", "constant dt = get_dt(trj) pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks =", "of a trajectory Parameters ---------- trj : mdtraj.Trajectory trajectory on", "Van Hove function at each time and position \"\"\" unique_elements", "considered, in the style of MDTraj atom selection selection2 :", "md.compute_rdf_t( traj=trj, pairs=pairs, times=times, r_range=r_range, bin_width=bin_width, n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation, periodic=periodic,", "i in pbar(range(n_chunks)): times = list() for j in range(chunk_length):", "selection1=None, selection2=None, r_range=(0, 1.0), bin_width=0.005, n_bins=200, self_correlation=True, periodic=True, opt=True): \"\"\"Compute", "style of MDTraj atom selection r_range : array-like, shape=(2,), optional,", "trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms form_factor1 = get_form_factor(element_name=elem1.symbol, water=water) form_factor2 =", "bool, default=True Whether or not to include the self-self correlations", "coeff norm += coeff # Reshape g_r_t to better represent", "raise UserWarning( 'Multiple elements found in a selection(s). Results may", "selection r_range : array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum and", "atom selection r_range : array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum", "or not to include the self-self correlations Returns ------- r", "coeff # Reshape g_r_t to better represent the discretization in", "= np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final /= norm t = trj.time[:chunk_length] return", "g_r_t = None for key, val in partial_dict.items(): elem1, elem2", "len(r))) for i in range(chunk_length): g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0)", "i in range(chunk_length): g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final /=", "on which to compute the Van Hove function chunk_length :", "between restarting averaging water : bool use X-ray form factors", "import numpy as np import mdtraj as md from progressbar", "n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass >", "a.element.mass > 0])) partial_dict = dict() for elem1, elem2 in", "it, but this serves to check that dt is constant", "selection2=selection2) n_chunks = int(trj.n_frames / chunk_length) g_r_t = None pbar", "of the bins in nanometers. n_bins : int, optional, default=None", "water=water) coeff = form_factor1 * concentration1 * form_factor2 * concentration2", "unique_elements = ( set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for", "default=None The number of bins. If specified, this will override", "unique_elements = list(set([a.element for a in trj.top.atoms if a.element.mass >", "if partial: return partial_dict norm = 0 g_r_t = None", "opt=opt) partial_dict[(elem1, elem2)] = g_r_t_partial if partial: return partial_dict norm", "'direcitly comprable to scattering experiments.' ) # Don't need to", "> 0])) partial_dict = dict() for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1],", "g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final /= norm t =", "None: g_r_t = np.zeros_like(g_r_t_frame) g_r_t += g_r_t_frame return r, g_r_t", "progressbar import ProgressBar from scattering.utils.utils import get_dt from scattering.utils.constants import", "norm t = trj.time[:chunk_length] return r, t, g_r_t_final def compute_partial_van_hove(trj,", "of MDTraj atom selection r_range : array-like, shape=(2,), optional, default=(0.0,", "a in trj.top.atoms if a.element.mass > 0])) partial_dict = dict()", "None for key, val in partial_dict.items(): elem1, elem2 = key", "a in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if", "val in unique_elements]): raise UserWarning( 'Multiple elements found in a", "= get_form_factor(element_name=elem2.symbol, water=water) coeff = form_factor1 * concentration1 * form_factor2", "chunk_length*i+j]) r, g_r_t_frame = md.compute_rdf_t( traj=trj, pairs=pairs, times=times, r_range=r_range, bin_width=bin_width,", "times.append([chunk_length*i, chunk_length*i+j]) r, g_r_t_frame = md.compute_rdf_t( traj=trj, pairs=pairs, times=times, r_range=r_range,", "override the `bin_width` parameter. self_correlation : bool, default=True Whether or", "g_r_t_final = np.empty(shape=(chunk_length, len(r))) for i in range(chunk_length): g_r_t_final[i, :]", "any([len(val) > 1 for val in unique_elements]): raise UserWarning( 'Multiple", "selection to be considered, in the style of MDTraj atom", "g_r_t_final def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None, r_range=(0, 1.0), bin_width=0.005, n_bins=200,", "self_correlation=True, periodic=True, opt=True, partial=False): \"\"\"Compute the partial van Hove function", "self_correlation=self_correlation, periodic=periodic, opt=opt) partial_dict[(elem1, elem2)] = g_r_t_partial if partial: return", "n_physical_atoms concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms form_factor1 = get_form_factor(element_name=elem1.symbol,", "restarting averaging water : bool use X-ray form factors for", "in partial_dict.items(): elem1, elem2 = key concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms", "periodic=True, opt=True, partial=False): \"\"\"Compute the partial van Hove function of", "at each time and position \"\"\" n_physical_atoms = len([a for", "to scattering experiments.' ) # Don't need to store it,", "water=False, r_range=(0, 1.0), bin_width=0.005, n_bins=None, self_correlation=True, periodic=True, opt=True, partial=False): \"\"\"Compute", "length of time between restarting averaging water : bool use", "and position \"\"\" unique_elements = ( set([a.element for a in", "to check that dt is constant dt = get_dt(trj) pairs", "from scattering.utils.constants import get_form_factor def compute_van_hove(trj, chunk_length, water=False, r_range=(0, 1.0),", "`bin_width` parameter. self_correlation : bool, default=True Whether or not to", "{}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation, periodic=periodic, opt=opt) partial_dict[(elem1,", "chunk_length) g_r_t = None pbar = ProgressBar() for i in", "chunk_length=10, selection1=None, selection2=None, r_range=(0, 1.0), bin_width=0.005, n_bins=200, self_correlation=True, periodic=True, opt=True):", "ProgressBar from scattering.utils.utils import get_dt from scattering.utils.constants import get_form_factor def", "self_correlation : bool, default=True Whether or not to include the", "for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]), )", "concentration1 * form_factor2 * concentration2 if g_r_t is None: g_r_t", "n_physical_atoms form_factor1 = get_form_factor(element_name=elem1.symbol, water=water) form_factor2 = get_form_factor(element_name=elem2.symbol, water=water) coeff", "n_bins=200, self_correlation=True, periodic=True, opt=True): \"\"\"Compute the partial van Hove function", "/ n_physical_atoms form_factor1 = get_form_factor(element_name=elem1.symbol, water=water) form_factor2 = get_form_factor(element_name=elem2.symbol, water=water)", "by histogram binning g_r_t : numpy.ndarray Van Hove function at", "return r, t, g_r_t_final def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None, r_range=(0,", "= int(trj.n_frames / chunk_length) g_r_t = None pbar = ProgressBar()", "shape=(2,), optional, default=(0.0, 1.0) Minimum and maximum radii. bin_width :", "If specified, this will override the `bin_width` parameter. self_correlation :", "/= norm t = trj.time[:chunk_length] return r, t, g_r_t_final def", "set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]),", "get_dt from scattering.utils.constants import get_form_factor def compute_van_hove(trj, chunk_length, water=False, r_range=(0,", "for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if any([len(val) > 1 for", "parameter. self_correlation : bool, default=True Whether or not to include", "be considered, in the style of MDTraj atom selection selection2", "comprable to scattering experiments.' ) # Don't need to store", "\"\"\" n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass", "numpy.ndarray Van Hove function at each time and position \"\"\"", "= list(set([a.element for a in trj.top.atoms if a.element.mass > 0]))", "in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if any([len(val)", "water=water) form_factor2 = get_form_factor(element_name=elem2.symbol, water=water) coeff = form_factor1 * concentration1", "bin_width=bin_width, n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation, periodic=periodic, opt=opt, ) if g_r_t is", "0])) partial_dict = dict() for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2):", "int(trj.n_frames / chunk_length) g_r_t = None pbar = ProgressBar() for", "each time and position \"\"\" n_physical_atoms = len([a for a", "to better represent the discretization in both r and t", "range(chunk_length): g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final /= norm t", "elem1, elem2 = key concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms", "from scattering.utils.utils import get_dt from scattering.utils.constants import get_form_factor def compute_van_hove(trj,", "= g_r_t_partial if partial: return partial_dict norm = 0 g_r_t", "unique_elements]): raise UserWarning( 'Multiple elements found in a selection(s). Results", "2): print('doing {0} and {1} ...'.format(elem1, elem2)) r, g_r_t_partial =", "dict() for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2): print('doing {0} and", "time between restarting averaging selection1 : str selection to be", "the Van Hove function chunk_length : int length of time", "pairs=pairs, times=times, r_range=r_range, bin_width=bin_width, n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation, periodic=periodic, opt=opt, )", "chunk_length, water=False, r_range=(0, 1.0), bin_width=0.005, n_bins=None, self_correlation=True, periodic=True, opt=True, partial=False):", "a in trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if any([len(val) > 1 for val", "default=0.005 Width of the bins in nanometers. n_bins : int,", "val * coeff norm += coeff # Reshape g_r_t to", "will override the `bin_width` parameter. self_correlation : bool, default=True Whether", "= np.zeros_like(val) g_r_t += val * coeff norm += coeff", "int length of time between restarting averaging water : bool", "in the style of MDTraj atom selection selection2 : str", "for j in range(chunk_length): times.append([chunk_length*i, chunk_length*i+j]) r, g_r_t_frame = md.compute_rdf_t(", "' 'direcitly comprable to scattering experiments.' ) # Don't need", "selection selection2 : str selection to be considered, in the", "it import numpy as np import mdtraj as md from", "trajectory on which to compute the Van Hove function chunk_length", "n_bins=n_bins, self_correlation=self_correlation, periodic=periodic, opt=opt) partial_dict[(elem1, elem2)] = g_r_t_partial if partial:", "style of MDTraj atom selection selection2 : str selection to", "the self-self correlations Returns ------- r : numpy.ndarray r positions", ": array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum and maximum radii.", "float, optional, default=0.005 Width of the bins in nanometers. n_bins", "chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation, periodic=periodic,", "Van Hove function at each time and position \"\"\" n_physical_atoms", "van Hove function of a trajectory Parameters ---------- trj :", "of time between restarting averaging selection1 : str selection to", "get_form_factor def compute_van_hove(trj, chunk_length, water=False, r_range=(0, 1.0), bin_width=0.005, n_bins=None, self_correlation=True,", "norm += coeff # Reshape g_r_t to better represent the", "list() for j in range(chunk_length): times.append([chunk_length*i, chunk_length*i+j]) r, g_r_t_frame =", "0]) unique_elements = list(set([a.element for a in trj.top.atoms if a.element.mass", "mdtraj.Trajectory trajectory on which to compute the Van Hove function", "default=True Whether or not to include the self-self correlations Returns", "for i in pbar(range(n_chunks)): times = list() for j in", "Hove function at each time and position \"\"\" unique_elements =", "better represent the discretization in both r and t g_r_t_final", "+= val * coeff norm += coeff # Reshape g_r_t", "{1} ...'.format(elem1, elem2)) r, g_r_t_partial = compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol),", "key concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms concentration2 = trj.atom_slice(trj.top.select('element", "is None: g_r_t = np.zeros_like(val) g_r_t += val * coeff", "of time between restarting averaging water : bool use X-ray", ": bool, default=True Whether or not to include the self-self", "may not be ' 'direcitly comprable to scattering experiments.' )", "times=times, r_range=r_range, bin_width=bin_width, n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation, periodic=periodic, opt=opt, ) if", "is constant dt = get_dt(trj) pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks", "in it.combinations_with_replacement(unique_elements[::-1], 2): print('doing {0} and {1} ...'.format(elem1, elem2)) r,", "int, optional, default=None The number of bins. If specified, this", ": int, optional, default=None The number of bins. If specified,", "r_range=r_range, bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation, periodic=periodic, opt=opt) partial_dict[(elem1, elem2)] = g_r_t_partial", "bin_width=0.005, n_bins=None, self_correlation=True, periodic=True, opt=True, partial=False): \"\"\"Compute the partial van", "radii. bin_width : float, optional, default=0.005 Width of the bins", "= ( set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for a", "periodic=periodic, opt=opt) partial_dict[(elem1, elem2)] = g_r_t_partial if partial: return partial_dict", "maximum radii. bin_width : float, optional, default=0.005 Width of the", "and maximum radii. bin_width : float, optional, default=0.005 Width of", "in range(chunk_length): g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final /= norm", "ProgressBar() for i in pbar(range(n_chunks)): times = list() for j", "= trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks = int(trj.n_frames / chunk_length) g_r_t =", "dt = get_dt(trj) pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks = int(trj.n_frames", "return partial_dict norm = 0 g_r_t = None for key,", "{0} and {1} ...'.format(elem1, elem2)) r, g_r_t_partial = compute_partial_van_hove(trj=trj, chunk_length=chunk_length,", "r and t g_r_t_final = np.empty(shape=(chunk_length, len(r))) for i in", "Don't need to store it, but this serves to check", "g_r_t_partial if partial: return partial_dict norm = 0 g_r_t =", "MDTraj atom selection r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)", "but this serves to check that dt is constant dt", "represent the discretization in both r and t g_r_t_final =", "for i in range(chunk_length): g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final", "averaging water : bool use X-ray form factors for water", "to be considered, in the style of MDTraj atom selection", "the style of MDTraj atom selection r_range : array-like, shape=(2,),", "------- r : numpy.ndarray r positions generated by histogram binning", "list(set([a.element for a in trj.top.atoms if a.element.mass > 0])) partial_dict", "numpy as np import mdtraj as md from progressbar import", "\"\"\"Compute the partial van Hove function of a trajectory Parameters", "# Don't need to store it, but this serves to", "pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks = int(trj.n_frames / chunk_length) g_r_t", "<reponame>XiaoboLinlin/scattering import itertools as it import numpy as np import", "experiments.' ) # Don't need to store it, but this", "position \"\"\" n_physical_atoms = len([a for a in trj.top.atoms if", "The number of bins. If specified, this will override the", "t = trj.time[:chunk_length] return r, t, g_r_t_final def compute_partial_van_hove(trj, chunk_length=10,", "discretization in both r and t g_r_t_final = np.empty(shape=(chunk_length, len(r)))", "in trj.top.atoms if a.element.mass > 0]) unique_elements = list(set([a.element for", "Width of the bins in nanometers. n_bins : int, optional,", "it.combinations_with_replacement(unique_elements[::-1], 2): print('doing {0} and {1} ...'.format(elem1, elem2)) r, g_r_t_partial", "store it, but this serves to check that dt is", "mdtraj as md from progressbar import ProgressBar from scattering.utils.utils import", "elem2)) r, g_r_t_partial = compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol),", "each time and position \"\"\" unique_elements = ( set([a.element for", "bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation, periodic=periodic, opt=opt) partial_dict[(elem1, elem2)] = g_r_t_partial if", "trajectory Parameters ---------- trj : mdtraj.Trajectory trajectory on which to", "in the style of MDTraj atom selection r_range : array-like,", "partial_dict = dict() for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2): print('doing", "str selection to be considered, in the style of MDTraj", "for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2): print('doing {0} and {1}", "partial_dict.items(): elem1, elem2 = key concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms /", "partial_dict norm = 0 g_r_t = None for key, val", "= np.empty(shape=(chunk_length, len(r))) for i in range(chunk_length): g_r_t_final[i, :] =", "if any([len(val) > 1 for val in unique_elements]): raise UserWarning(", "= get_dt(trj) pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks = int(trj.n_frames /", "positions generated by histogram binning g_r_t : numpy.ndarray Van Hove", "* concentration1 * form_factor2 * concentration2 if g_r_t is None:", "1.0), bin_width=0.005, n_bins=None, self_correlation=True, periodic=True, opt=True, partial=False): \"\"\"Compute the partial", "elem2)] = g_r_t_partial if partial: return partial_dict norm = 0", "if g_r_t is None: g_r_t = np.zeros_like(val) g_r_t += val", "elements found in a selection(s). Results may not be '", "in nanometers. n_bins : int, optional, default=None The number of", "trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if any([len(val) >", "as md from progressbar import ProgressBar from scattering.utils.utils import get_dt", "at each time and position \"\"\" unique_elements = ( set([a.element", "periodic=True, opt=True): \"\"\"Compute the partial van Hove function of a", "polarization r_range : array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum and", "t g_r_t_final = np.empty(shape=(chunk_length, len(r))) for i in range(chunk_length): g_r_t_final[i,", "averaging selection1 : str selection to be considered, in the", "and t g_r_t_final = np.empty(shape=(chunk_length, len(r))) for i in range(chunk_length):", "not be ' 'direcitly comprable to scattering experiments.' ) #", "concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms", "1.0), bin_width=0.005, n_bins=200, self_correlation=True, periodic=True, opt=True): \"\"\"Compute the partial van", "need to store it, but this serves to check that", "r_range=r_range, bin_width=bin_width, n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation, periodic=periodic, opt=opt, ) if g_r_t", "compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None, r_range=(0, 1.0), bin_width=0.005, n_bins=200, self_correlation=True, periodic=True,", "a.element.mass > 0]) unique_elements = list(set([a.element for a in trj.top.atoms", "dt is constant dt = get_dt(trj) pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2)" ]
[ "import * from .quant_lenet5 import * from .quant_cnv import *", "# nn_benchmark # author - <NAME> # https://github.com/QDucasse # <EMAIL>", ".alexnet import * from .lenet import * from .lenet5 import", "* from .quant_alexnet import * from .quant_lenet5 import * from", "* from .lenet5 import * from .mobilenetv1 import * from", "- <NAME> # https://github.com/QDucasse # <EMAIL> from __future__ import absolute_import", "* from .lenet import * from .lenet5 import * from", "import absolute_import __all__ = [\"lenet\",\"lenet5\",\"quant_lenet5\", \"quant_cnv\", \"quant_tfc\", \"mobilenetv1\",\"quant_mobilenetv1\", \"vggnet\", \"quant_vggnet\",", "= [\"lenet\",\"lenet5\",\"quant_lenet5\", \"quant_cnv\", \"quant_tfc\", \"mobilenetv1\",\"quant_mobilenetv1\", \"vggnet\", \"quant_vggnet\", \"common\", \"alexnet\", \"quant_alexnet\"]", ".quant_lenet5 import * from .quant_cnv import * from .quant_tfc import", "from .vggnet import * from .quant_vggnet import * from .common", "\"common\", \"alexnet\", \"quant_alexnet\"] from .alexnet import * from .lenet import", ".quant_mobilenetv1 import * from .quant_alexnet import * from .quant_lenet5 import", ".mobilenetv1 import * from .quant_mobilenetv1 import * from .quant_alexnet import", "import * from .quant_cnv import * from .quant_tfc import *", "from __future__ import absolute_import __all__ = [\"lenet\",\"lenet5\",\"quant_lenet5\", \"quant_cnv\", \"quant_tfc\", \"mobilenetv1\",\"quant_mobilenetv1\",", "\"vggnet\", \"quant_vggnet\", \"common\", \"alexnet\", \"quant_alexnet\"] from .alexnet import * from", "* from .quant_tfc import * from .vggnet import * from", ".lenet import * from .lenet5 import * from .mobilenetv1 import", "from .mobilenetv1 import * from .quant_mobilenetv1 import * from .quant_alexnet", "from .alexnet import * from .lenet import * from .lenet5", ".lenet5 import * from .mobilenetv1 import * from .quant_mobilenetv1 import", ".quant_alexnet import * from .quant_lenet5 import * from .quant_cnv import", ".quant_tfc import * from .vggnet import * from .quant_vggnet import", "import * from .vggnet import * from .quant_vggnet import *", "* from .vggnet import * from .quant_vggnet import * from", "absolute_import __all__ = [\"lenet\",\"lenet5\",\"quant_lenet5\", \"quant_cnv\", \"quant_tfc\", \"mobilenetv1\",\"quant_mobilenetv1\", \"vggnet\", \"quant_vggnet\", \"common\",", "import * from .mobilenetv1 import * from .quant_mobilenetv1 import *", "from .lenet5 import * from .mobilenetv1 import * from .quant_mobilenetv1", "\"quant_tfc\", \"mobilenetv1\",\"quant_mobilenetv1\", \"vggnet\", \"quant_vggnet\", \"common\", \"alexnet\", \"quant_alexnet\"] from .alexnet import", "# -*- coding: utf-8 -*- # nn_benchmark # author -", "\"quant_cnv\", \"quant_tfc\", \"mobilenetv1\",\"quant_mobilenetv1\", \"vggnet\", \"quant_vggnet\", \"common\", \"alexnet\", \"quant_alexnet\"] from .alexnet", "* from .mobilenetv1 import * from .quant_mobilenetv1 import * from", "\"mobilenetv1\",\"quant_mobilenetv1\", \"vggnet\", \"quant_vggnet\", \"common\", \"alexnet\", \"quant_alexnet\"] from .alexnet import *", "# author - <NAME> # https://github.com/QDucasse # <EMAIL> from __future__", "import * from .lenet import * from .lenet5 import *", "* from .quant_lenet5 import * from .quant_cnv import * from", "import * from .quant_vggnet import * from .common import *", "-*- # nn_benchmark # author - <NAME> # https://github.com/QDucasse #", "import * from .quant_tfc import * from .vggnet import *", "<EMAIL> from __future__ import absolute_import __all__ = [\"lenet\",\"lenet5\",\"quant_lenet5\", \"quant_cnv\", \"quant_tfc\",", "# https://github.com/QDucasse # <EMAIL> from __future__ import absolute_import __all__ =", "https://github.com/QDucasse # <EMAIL> from __future__ import absolute_import __all__ = [\"lenet\",\"lenet5\",\"quant_lenet5\",", "from .quant_cnv import * from .quant_tfc import * from .vggnet", "from .quant_tfc import * from .vggnet import * from .quant_vggnet", "\"quant_alexnet\"] from .alexnet import * from .lenet import * from", ".quant_cnv import * from .quant_tfc import * from .vggnet import", "[\"lenet\",\"lenet5\",\"quant_lenet5\", \"quant_cnv\", \"quant_tfc\", \"mobilenetv1\",\"quant_mobilenetv1\", \"vggnet\", \"quant_vggnet\", \"common\", \"alexnet\", \"quant_alexnet\"] from", "coding: utf-8 -*- # nn_benchmark # author - <NAME> #", "__future__ import absolute_import __all__ = [\"lenet\",\"lenet5\",\"quant_lenet5\", \"quant_cnv\", \"quant_tfc\", \"mobilenetv1\",\"quant_mobilenetv1\", \"vggnet\",", "import * from .quant_mobilenetv1 import * from .quant_alexnet import *", "from .quant_mobilenetv1 import * from .quant_alexnet import * from .quant_lenet5", "import * from .lenet5 import * from .mobilenetv1 import *", "* from .quant_mobilenetv1 import * from .quant_alexnet import * from", "author - <NAME> # https://github.com/QDucasse # <EMAIL> from __future__ import", "-*- coding: utf-8 -*- # nn_benchmark # author - <NAME>", "<NAME> # https://github.com/QDucasse # <EMAIL> from __future__ import absolute_import __all__", "\"alexnet\", \"quant_alexnet\"] from .alexnet import * from .lenet import *", "# <EMAIL> from __future__ import absolute_import __all__ = [\"lenet\",\"lenet5\",\"quant_lenet5\", \"quant_cnv\",", "from .quant_alexnet import * from .quant_lenet5 import * from .quant_cnv", "import * from .quant_alexnet import * from .quant_lenet5 import *", "from .quant_lenet5 import * from .quant_cnv import * from .quant_tfc", "nn_benchmark # author - <NAME> # https://github.com/QDucasse # <EMAIL> from", "utf-8 -*- # nn_benchmark # author - <NAME> # https://github.com/QDucasse", "\"quant_vggnet\", \"common\", \"alexnet\", \"quant_alexnet\"] from .alexnet import * from .lenet", "from .lenet import * from .lenet5 import * from .mobilenetv1", ".vggnet import * from .quant_vggnet import * from .common import", "__all__ = [\"lenet\",\"lenet5\",\"quant_lenet5\", \"quant_cnv\", \"quant_tfc\", \"mobilenetv1\",\"quant_mobilenetv1\", \"vggnet\", \"quant_vggnet\", \"common\", \"alexnet\",", "* from .quant_cnv import * from .quant_tfc import * from" ]
[ "cv.imshow('Gray', gray) # blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT) cv.imshow('Blur', blur)", "cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray) # blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT) cv.imshow('Blur',", "canny = cv.Canny(blur, 125, 175) cv.imshow('Canny Edges', canny) # ret,", "import cv2 as cv import numpy as np img =", "blur) canny = cv.Canny(blur, 125, 175) cv.imshow('Canny Edges', canny) #", "175) cv.imshow('Canny Edges', canny) # ret, thresh = cv.threshold(gray, 125,", "found!') # cv.drawContours(blank, contours, -1, (200,120,100), 1) cv.imshow('Contours Drawn', blank)", "= np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank', blank) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray',", "import numpy as np img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img) #", "cv.imshow('Blur', blur) canny = cv.Canny(blur, 125, 175) cv.imshow('Canny Edges', canny)", "contour(s) found!') # cv.drawContours(blank, contours, -1, (200,120,100), 1) cv.imshow('Contours Drawn',", "# blank = np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank', blank) gray = cv.cvtColor(img,", "as cv import numpy as np img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats',", "cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img) # blank = np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank', blank)", "cv.BORDER_DEFAULT) cv.imshow('Blur', blur) canny = cv.Canny(blur, 125, 175) cv.imshow('Canny Edges',", "cv.imshow('Blank', blank) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray) # blur", "thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY) cv.imshow('Thresh', thresh) # contours,", "np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank', blank) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray)", "cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s) found!') # cv.drawContours(blank, contours, -1, (200,120,100), 1)", "contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s) found!') #", "cv import numpy as np img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img)", "canny) # ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY) cv.imshow('Thresh',", "cv.THRESH_BINARY) cv.imshow('Thresh', thresh) # contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)", "= cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray) # blur = cv.GaussianBlur(gray, (5,5),", "thresh) # contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s)", "np img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img) # blank = np.zeros(img.shape[:2],", "= cv.Canny(blur, 125, 175) cv.imshow('Canny Edges', canny) # ret, thresh", "255, cv.THRESH_BINARY) cv.imshow('Thresh', thresh) # contours, hierarchies = cv.findContours(canny, cv.RETR_LIST,", "= cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s) found!') # cv.drawContours(blank, contours,", "#pylint:disable=no-member import cv2 as cv import numpy as np img", "125, 255, cv.THRESH_BINARY) cv.imshow('Thresh', thresh) # contours, hierarchies = cv.findContours(canny,", "cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray) # blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)", "ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY) cv.imshow('Thresh', thresh) #", "125, 175) cv.imshow('Canny Edges', canny) # ret, thresh = cv.threshold(gray,", "cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s) found!') # cv.drawContours(blank, contours, -1,", "print(f'{len(contours)} contour(s) found!') # cv.drawContours(blank, contours, -1, (200,120,100), 1) cv.imshow('Contours", "blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT) cv.imshow('Blur', blur) canny = cv.Canny(blur,", "cv2 as cv import numpy as np img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg')", "# ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY) cv.imshow('Thresh', thresh)", "dtype='uint8') cv.imshow('Blank', blank) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray) #", "cv.imshow('Thresh', thresh) # contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)}", "blank) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray) # blur =", "img) # blank = np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank', blank) gray =", "Edges', canny) # ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)", "gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray) # blur = cv.GaussianBlur(gray,", "cv.threshold(gray, 125, 255, cv.THRESH_BINARY) cv.imshow('Thresh', thresh) # contours, hierarchies =", "= cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img) # blank = np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank',", "img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img) # blank = np.zeros(img.shape[:2], dtype='uint8')", "cv.imshow('Cats', img) # blank = np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank', blank) gray", "(5,5), cv.BORDER_DEFAULT) cv.imshow('Blur', blur) canny = cv.Canny(blur, 125, 175) cv.imshow('Canny", "as np img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img) # blank =", "# contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s) found!')", "hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s) found!') # cv.drawContours(blank,", "# blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT) cv.imshow('Blur', blur) canny =", "cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s) found!') # cv.drawContours(blank, contours, -1, (200,120,100),", "numpy as np img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img) # blank", "# cv.drawContours(blank, contours, -1, (200,120,100), 1) cv.imshow('Contours Drawn', blank) cv.waitKey(0)", "= cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT) cv.imshow('Blur', blur) canny = cv.Canny(blur, 125,", "= cv.threshold(gray, 125, 255, cv.THRESH_BINARY) cv.imshow('Thresh', thresh) # contours, hierarchies", "cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT) cv.imshow('Blur', blur) canny = cv.Canny(blur, 125, 175)", "cv.Canny(blur, 125, 175) cv.imshow('Canny Edges', canny) # ret, thresh =", "cv.imshow('Canny Edges', canny) # ret, thresh = cv.threshold(gray, 125, 255,", "gray) # blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT) cv.imshow('Blur', blur) canny", "blank = np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank', blank) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)" ]
[]
[ "import Document class Practitioner(Document): def before_save(self): self.practitioner_full_name = f'{self.first_name} {self.second_name", "license.txt # import frappe from frappe.model.document import Document class Practitioner(Document):", "and contributors # For license information, please see license.txt #", "information, please see license.txt # import frappe from frappe.model.document import", "Juve and contributors # For license information, please see license.txt", "class Practitioner(Document): def before_save(self): self.practitioner_full_name = f'{self.first_name} {self.second_name or \"\"}'", "# Copyright (c) 2022, Juve and contributors # For license", "Document class Practitioner(Document): def before_save(self): self.practitioner_full_name = f'{self.first_name} {self.second_name or", "license information, please see license.txt # import frappe from frappe.model.document", "contributors # For license information, please see license.txt # import", "For license information, please see license.txt # import frappe from", "# For license information, please see license.txt # import frappe", "(c) 2022, Juve and contributors # For license information, please", "frappe from frappe.model.document import Document class Practitioner(Document): def before_save(self): self.practitioner_full_name", "import frappe from frappe.model.document import Document class Practitioner(Document): def before_save(self):", "see license.txt # import frappe from frappe.model.document import Document class", "frappe.model.document import Document class Practitioner(Document): def before_save(self): self.practitioner_full_name = f'{self.first_name}", "# import frappe from frappe.model.document import Document class Practitioner(Document): def", "please see license.txt # import frappe from frappe.model.document import Document", "Copyright (c) 2022, Juve and contributors # For license information,", "2022, Juve and contributors # For license information, please see", "from frappe.model.document import Document class Practitioner(Document): def before_save(self): self.practitioner_full_name =" ]
[ "+ '/.git/config', 'a+') as gitConfig: if (\"[checkstyle]\" not in gitConfig.read()):", "for pre-commit. Has to run as root, enter password plz\")", "+ \"/.git/hooks/\" + checkstyleName) urlretrieve(checkstyleUrl, basePath + \"/.git/hooks/\" + checkstyleName)", "gitConfig.write(\"jar = %s\\n\" % (basePath + \"/.git/hooks/\" + checkstyleName)) gitConfig.write(\"checkfile", "open(basePath + '/.git/config', 'a+') as gitConfig: if (\"[checkstyle]\" not in", "plz\") call([\"sudo\", \"chmod\", \"+x\", (basePath + \"/.git/hooks/pre-commit\")]) if __name__ ==", "sys from os import path import urllib; from urllib.request import", "basePath + \"/.git/hooks/\" + checkstyleName) print(\"Downloading pre-commit script to %s\"", "import call def install_hooks(directory): checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit'", "%s...\" % basePath + \"/.git/hooks/\" + checkstyleName) urlretrieve(checkstyleUrl, basePath +", "to %s...\" % basePath + \"/.git/hooks/\" + checkstyleName) urlretrieve(checkstyleUrl, basePath", "% basePath + \"/.git/hooks/pre-commit\") urlretrieve(preCommitUrl, basePath + \"/.git/hooks/pre-commit\") with open(basePath", "basePath + \"/.git/hooks/pre-commit\") urlretrieve(preCommitUrl, basePath + \"/.git/hooks/pre-commit\") with open(basePath +", "print(\"Enter a directory to install hooks\") else: if (path.exists(sys.argv[1])): install_hooks(sys.argv[1])", "checkstyle to %s...\" % basePath + \"/.git/hooks/\" + checkstyleName) urlretrieve(checkstyleUrl,", "\"/.git/hooks/\" + checkstyleName) urlretrieve(checkstyleUrl, basePath + \"/.git/hooks/\" + checkstyleName) print(\"Downloading", "to run as root, enter password plz\") call([\"sudo\", \"chmod\", \"+x\",", "basePath + \"/.git/hooks/pre-commit\") with open(basePath + '/.git/config', 'a+') as gitConfig:", "enter password plz\") call([\"sudo\", \"chmod\", \"+x\", (basePath + \"/.git/hooks/pre-commit\")]) if", "gitConfig.read()): print(\"Adding git configurations to .git/config\") gitConfig.write(\"[checkstyle]\\n\") gitConfig.write(\"jar = %s\\n\"", "gitConfig.write(\"[checkstyle]\\n\") gitConfig.write(\"jar = %s\\n\" % (basePath + \"/.git/hooks/\" + checkstyleName))", "from urllib.request import urlretrieve from subprocess import call def install_hooks(directory):", "= %s\\n\" % (basePath + \"/.git/hooks/\" + checkstyleName)) gitConfig.write(\"checkfile =", "(basePath + \"/.git/hooks/pre-commit\")]) if __name__ == \"__main__\": if (len(sys.argv) <", "\"__main__\": if (len(sys.argv) < 2): print(\"Enter a directory to install", ".git/config\") gitConfig.write(\"[checkstyle]\\n\") gitConfig.write(\"jar = %s\\n\" % (basePath + \"/.git/hooks/\" +", "not in gitConfig.read()): print(\"Adding git configurations to .git/config\") gitConfig.write(\"[checkstyle]\\n\") gitConfig.write(\"jar", "1] basePath = path.abspath(directory) print(\"Downloading checkstyle to %s...\" % basePath", "__name__ == \"__main__\": if (len(sys.argv) < 2): print(\"Enter a directory", "run as root, enter password plz\") call([\"sudo\", \"chmod\", \"+x\", (basePath", "as gitConfig: if (\"[checkstyle]\" not in gitConfig.read()): print(\"Adding git configurations", "subprocess import call def install_hooks(directory): checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl =", "+ checkstyleName) urlretrieve(checkstyleUrl, basePath + \"/.git/hooks/\" + checkstyleName) print(\"Downloading pre-commit", "from subprocess import call def install_hooks(directory): checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl", "root, enter password plz\") call([\"sudo\", \"chmod\", \"+x\", (basePath + \"/.git/hooks/pre-commit\")])", "% (basePath + \"/.git/hooks/\" + checkstyleName)) gitConfig.write(\"checkfile = %s\\n\" %", "+ \"/.git/hooks/pre-commit\") urlretrieve(preCommitUrl, basePath + \"/.git/hooks/pre-commit\") with open(basePath + '/.git/config',", "'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1] basePath", "+ checkstyleName)) gitConfig.write(\"checkfile = %s\\n\" % (basePath + \"/checkstyle_config.xml\")) print(\"Changing", "path.abspath(directory) print(\"Downloading checkstyle to %s...\" % basePath + \"/.git/hooks/\" +", "'/.git/config', 'a+') as gitConfig: if (\"[checkstyle]\" not in gitConfig.read()): print(\"Adding", "= 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1] basePath = path.abspath(directory)", "gitConfig.write(\"checkfile = %s\\n\" % (basePath + \"/checkstyle_config.xml\")) print(\"Changing permissions for", "call([\"sudo\", \"chmod\", \"+x\", (basePath + \"/.git/hooks/pre-commit\")]) if __name__ == \"__main__\":", "path import urllib; from urllib.request import urlretrieve from subprocess import", "urlretrieve from subprocess import call def install_hooks(directory): checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar'", "print(\"Downloading pre-commit script to %s\" % basePath + \"/.git/hooks/pre-commit\") urlretrieve(preCommitUrl,", "(basePath + \"/checkstyle_config.xml\")) print(\"Changing permissions for pre-commit. Has to run", "%s\\n\" % (basePath + \"/.git/hooks/\" + checkstyleName)) gitConfig.write(\"checkfile = %s\\n\"", "def install_hooks(directory): checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName =", "call def install_hooks(directory): checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName", "% basePath + \"/.git/hooks/\" + checkstyleName) urlretrieve(checkstyleUrl, basePath + \"/.git/hooks/\"", "checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) -", "print(\"Changing permissions for pre-commit. Has to run as root, enter", "= %s\\n\" % (basePath + \"/checkstyle_config.xml\")) print(\"Changing permissions for pre-commit.", "+ \"/.git/hooks/pre-commit\")]) if __name__ == \"__main__\": if (len(sys.argv) < 2):", "+ \"/checkstyle_config.xml\")) print(\"Changing permissions for pre-commit. Has to run as", "urlretrieve(checkstyleUrl, basePath + \"/.git/hooks/\" + checkstyleName) print(\"Downloading pre-commit script to", "= path.abspath(directory) print(\"Downloading checkstyle to %s...\" % basePath + \"/.git/hooks/\"", "%s\\n\" % (basePath + \"/checkstyle_config.xml\")) print(\"Changing permissions for pre-commit. Has", "(basePath + \"/.git/hooks/\" + checkstyleName)) gitConfig.write(\"checkfile = %s\\n\" % (basePath", "pre-commit. Has to run as root, enter password plz\") call([\"sudo\",", "checkstyleName) print(\"Downloading pre-commit script to %s\" % basePath + \"/.git/hooks/pre-commit\")", "\"chmod\", \"+x\", (basePath + \"/.git/hooks/pre-commit\")]) if __name__ == \"__main__\": if", "urlretrieve(preCommitUrl, basePath + \"/.git/hooks/pre-commit\") with open(basePath + '/.git/config', 'a+') as", "in gitConfig.read()): print(\"Adding git configurations to .git/config\") gitConfig.write(\"[checkstyle]\\n\") gitConfig.write(\"jar =", "basePath = path.abspath(directory) print(\"Downloading checkstyle to %s...\" % basePath +", "'a+') as gitConfig: if (\"[checkstyle]\" not in gitConfig.read()): print(\"Adding git", "2): print(\"Enter a directory to install hooks\") else: if (path.exists(sys.argv[1])):", "<reponame>JustasGau/DonjinKrawler import sys from os import path import urllib; from", "checkstyleName)) gitConfig.write(\"checkfile = %s\\n\" % (basePath + \"/checkstyle_config.xml\")) print(\"Changing permissions", "< 2): print(\"Enter a directory to install hooks\") else: if", "import sys from os import path import urllib; from urllib.request", "to %s\" % basePath + \"/.git/hooks/pre-commit\") urlretrieve(preCommitUrl, basePath + \"/.git/hooks/pre-commit\")", "urllib; from urllib.request import urlretrieve from subprocess import call def", "import urllib; from urllib.request import urlretrieve from subprocess import call", "\"/.git/hooks/\" + checkstyleName) print(\"Downloading pre-commit script to %s\" % basePath", "%s\" % basePath + \"/.git/hooks/pre-commit\") urlretrieve(preCommitUrl, basePath + \"/.git/hooks/pre-commit\") with", "% (basePath + \"/checkstyle_config.xml\")) print(\"Changing permissions for pre-commit. Has to", "= checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1] basePath = path.abspath(directory) print(\"Downloading checkstyle to", "\"/checkstyle_config.xml\")) print(\"Changing permissions for pre-commit. Has to run as root,", "password plz\") call([\"sudo\", \"chmod\", \"+x\", (basePath + \"/.git/hooks/pre-commit\")]) if __name__", "= 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1]", "(len(sys.argv) < 2): print(\"Enter a directory to install hooks\") else:", "as root, enter password plz\") call([\"sudo\", \"chmod\", \"+x\", (basePath +", "(\"[checkstyle]\" not in gitConfig.read()): print(\"Adding git configurations to .git/config\") gitConfig.write(\"[checkstyle]\\n\")", "script to %s\" % basePath + \"/.git/hooks/pre-commit\") urlretrieve(preCommitUrl, basePath +", "install_hooks(directory): checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/'))", "pre-commit script to %s\" % basePath + \"/.git/hooks/pre-commit\") urlretrieve(preCommitUrl, basePath", "configurations to .git/config\") gitConfig.write(\"[checkstyle]\\n\") gitConfig.write(\"jar = %s\\n\" % (basePath +", "\"/.git/hooks/\" + checkstyleName)) gitConfig.write(\"checkfile = %s\\n\" % (basePath + \"/checkstyle_config.xml\"))", "preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1] basePath =", "\"/.git/hooks/pre-commit\")]) if __name__ == \"__main__\": if (len(sys.argv) < 2): print(\"Enter", "basePath + \"/.git/hooks/\" + checkstyleName) urlretrieve(checkstyleUrl, basePath + \"/.git/hooks/\" +", "os import path import urllib; from urllib.request import urlretrieve from", "if (len(sys.argv) < 2): print(\"Enter a directory to install hooks\")", "checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1] basePath = path.abspath(directory) print(\"Downloading checkstyle", "import path import urllib; from urllib.request import urlretrieve from subprocess", "- 1] basePath = path.abspath(directory) print(\"Downloading checkstyle to %s...\" %", "\"/.git/hooks/pre-commit\") with open(basePath + '/.git/config', 'a+') as gitConfig: if (\"[checkstyle]\"", "urllib.request import urlretrieve from subprocess import call def install_hooks(directory): checkstyleUrl", "with open(basePath + '/.git/config', 'a+') as gitConfig: if (\"[checkstyle]\" not", "if __name__ == \"__main__\": if (len(sys.argv) < 2): print(\"Enter a", "gitConfig: if (\"[checkstyle]\" not in gitConfig.read()): print(\"Adding git configurations to", "Has to run as root, enter password plz\") call([\"sudo\", \"chmod\",", "import urlretrieve from subprocess import call def install_hooks(directory): checkstyleUrl =", "\"/.git/hooks/pre-commit\") urlretrieve(preCommitUrl, basePath + \"/.git/hooks/pre-commit\") with open(basePath + '/.git/config', 'a+')", "checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1] basePath = path.abspath(directory) print(\"Downloading checkstyle to %s...\"", "from os import path import urllib; from urllib.request import urlretrieve", "to .git/config\") gitConfig.write(\"[checkstyle]\\n\") gitConfig.write(\"jar = %s\\n\" % (basePath + \"/.git/hooks/\"", "+ checkstyleName) print(\"Downloading pre-commit script to %s\" % basePath +", "checkstyleName) urlretrieve(checkstyleUrl, basePath + \"/.git/hooks/\" + checkstyleName) print(\"Downloading pre-commit script", "git configurations to .git/config\") gitConfig.write(\"[checkstyle]\\n\") gitConfig.write(\"jar = %s\\n\" % (basePath", "\"+x\", (basePath + \"/.git/hooks/pre-commit\")]) if __name__ == \"__main__\": if (len(sys.argv)", "if (\"[checkstyle]\" not in gitConfig.read()): print(\"Adding git configurations to .git/config\")", "+ \"/.git/hooks/\" + checkstyleName) print(\"Downloading pre-commit script to %s\" %", "print(\"Adding git configurations to .git/config\") gitConfig.write(\"[checkstyle]\\n\") gitConfig.write(\"jar = %s\\n\" %", "+ \"/.git/hooks/pre-commit\") with open(basePath + '/.git/config', 'a+') as gitConfig: if", "== \"__main__\": if (len(sys.argv) < 2): print(\"Enter a directory to", "+ \"/.git/hooks/\" + checkstyleName)) gitConfig.write(\"checkfile = %s\\n\" % (basePath +", "'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1] basePath = path.abspath(directory) print(\"Downloading", "print(\"Downloading checkstyle to %s...\" % basePath + \"/.git/hooks/\" + checkstyleName)", "permissions for pre-commit. Has to run as root, enter password" ]
[ "= {} # Set up the paths and environment for", "for static pages. It is the default handler.\"\"\" # Build", "the file then return an error to the client. try:", "file_type data = open(file_path, 'rb').read() self.response.body_file.write(data) except Exception as e:", "based on WebOb, Jinja2, WSGI with a simple router \"\"\"", "handled.\"\"\" # Create the WebOb Request and Response objects for", "environment so it can find and render pages.\"\"\" if self.static_root", "= open(file_path, 'rb').read() self.response.body_file.write(data) except Exception as e: self.response.status =", "a simple router \"\"\" import os import hmac import hashlib", "hashlib import mimetypes from wsgiref.simple_server import WSGIServer, WSGIRequestHandler from webob", "webapp based on WebOb, Jinja2, WSGI with a simple router", "pages.\"\"\" if self.static_root is None: self.static_root = 'static' if self.templates_root", "what port to listen on\"\"\" # Create the WSGI HTTP", "handler. if handler: handler() else: self.static() return self.response(environ, start_response) def", "Jinja2, WSGI with a simple router \"\"\" import os import", "handler.\"\"\" # Build a file path using either the resource", "is None: self.routes = {} # Set up the paths", "objects for # used to read the request and write", "WSGI HTTP server. Set the port it should listen on.", "except Exception as e: self.response.status = 404 self.response.write(str(e)) def render_template(self,", "def render_template(self, template_name, template_values={}): \"\"\"Renders Jinja2 templates into HTML\"\"\" #", "Jinja. This is how it finds the templates. self.template_path =", "the HTTPServer when there is a request to be handled.\"\"\"", "HTTP server. Set the port it should listen on. #", "to HTML # then write it to the response object", "directory the server is running it as save the path.", "class and configures the paths and the Jinja2 environment so", "WSGIRequestHandler from webob import Request from webob import Response from", "# If we can't find the file then return an", "= mimetypes.guess_type(file_path)[0] self.response.content_type = file_type data = open(file_path, 'rb').read() self.response.body_file.write(data)", "self.static_root, resource) else: file_path = os.path.join(self.current_dir, self.static_root, self.request.path_info[1:]) print(\"File path:\",", "class MicroServer(object): \"\"\"Small web server.\"\"\" def __init__(self): \"\"\"Initializes the class", "is None: self.static_root = 'static' if self.templates_root is None: self.templates_root", "start_response): \"\"\"This method is called by the HTTPServer when there", "static handler. if handler: handler() else: self.static() return self.response(environ, start_response)", "if self.templates_root is None: self.templates_root = 'templates' if self.routes is", "find the site's resources. self.current_dir = os.path.dirname(os.path.realpath(__file__)) def __call__(self, environ,", "the WSGI HTTP server. Set the port it should listen", "import hmac import hashlib import mimetypes from wsgiref.simple_server import WSGIServer,", "an error to the client. try: file_type = mimetypes.guess_type(file_path)[0] self.response.content_type", "Environment, FileSystemLoader class MicroServer(object): \"\"\"Small web server.\"\"\" def __init__(self): \"\"\"Initializes", "e: self.response.status = 404 self.response.write(str(e)) def render_template(self, template_name, template_values={}): \"\"\"Renders", "path if there is one. handler = self.routes.get(self.request.path_info) # If", "data = open(file_path, 'rb').read() self.response.body_file.write(data) except Exception as e: self.response.status", "self.response.content_type = file_type data = open(file_path, 'rb').read() self.response.body_file.write(data) except Exception", "server. server = WSGIServer(('', 8000), WSGIRequestHandler) server.set_app(self) print(\"Serving on http://localhost:8000/", "self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path)) # Figure out what directory the", "def __init__(self): \"\"\"Initializes the class and configures the paths and", "to open the file. If we can then guess its", "pages. It is the default handler.\"\"\" # Build a file", "resource: file_path = os.path.join(self.current_dir, self.static_root, resource) else: file_path = os.path.join(self.current_dir,", "Request and Response objects for # used to read the", "a hash from strings based on a passphrase.\"\"\" cookiehash =", "response object to send it to the client. # If", "path using either the resource parameter or the path in", "return cookiehash.hexdigest() def run(self, port): \"\"\"Starts the HTTP server and", "start_response) def static(self, resource=''): \"\"\"Handles request for static pages. It", "Set up the paths and environment for Jinja. This is", "the port it should listen on. # And start the", "# Find the template and render it to HTML #", "file_type = mimetypes.guess_type(file_path)[0] self.response.content_type = file_type data = open(file_path, 'rb').read()", "and write the response. self.request = Request(environ) self.response = Response()", "render_template(self, template_name, template_values={}): \"\"\"Renders Jinja2 templates into HTML\"\"\" # Find", "= 404 self.response.write(str(e)) def render_template(self, template_name, template_values={}): \"\"\"Renders Jinja2 templates", "self.routes.get(self.request.path_info) # If there is call it. If not call", "Figure out what directory the server is running it as", "a passphrase.\"\"\" cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1) for part in parts:", "when there is a request to be handled.\"\"\" # Create", "Request from webob import Response from jinja2 import Environment, FileSystemLoader", "html = template.render(template_values) self.response.write(html) def get_signature(self, passphrase, *parts): \"\"\"Creates a", "request for static pages. It is the default handler.\"\"\" #", "running it as save the path. # The path will", "and tells it what port to listen on\"\"\" # Create", "either the resource parameter or the path in the request.", "response object to send it to the client. template =", "= os.path.dirname(os.path.realpath(__file__)) def __call__(self, environ, start_response): \"\"\"This method is called", "'static' if self.templates_root is None: self.templates_root = 'templates' if self.routes", "it. If not call the static handler. if handler: handler()", "os.path.join(os.path.dirname(__file__), self.templates_root) self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path)) # Figure out what", "\"\"\"This method is called by the HTTPServer when there is", "os.path.join(self.current_dir, self.static_root, resource) else: file_path = os.path.join(self.current_dir, self.static_root, self.request.path_info[1:]) print(\"File", "open the file. If we can then guess its type", "render it to HTML # then write it to the", "on\"\"\" # Create the WSGI HTTP server. Set the port", "is the default handler.\"\"\" # Build a file path using", "and render pages.\"\"\" if self.static_root is None: self.static_root = 'static'", "None: self.static_root = 'static' if self.templates_root is None: self.templates_root =", "the path. # The path will be used later to", "= WSGIServer(('', 8000), WSGIRequestHandler) server.set_app(self) print(\"Serving on http://localhost:8000/ ...\") server.serve_forever()", "the class and configures the paths and the Jinja2 environment", "template_values={}): \"\"\"Renders Jinja2 templates into HTML\"\"\" # Find the template", "\"\"\"Small web server.\"\"\" def __init__(self): \"\"\"Initializes the class and configures", "strings based on a passphrase.\"\"\" cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1) for", "will be used later to find the site's resources. self.current_dir", "be handled.\"\"\" # Create the WebOb Request and Response objects", "a file path using either the resource parameter or the", "simple router \"\"\" import os import hmac import hashlib import", "self.response(environ, start_response) def static(self, resource=''): \"\"\"Handles request for static pages.", "def __call__(self, environ, start_response): \"\"\"This method is called by the", "run(self, port): \"\"\"Starts the HTTP server and tells it what", "for part in parts: cookiehash.update(part.encode()) return cookiehash.hexdigest() def run(self, port):", "to listen on\"\"\" # Create the WSGI HTTP server. Set", "webob import Response from jinja2 import Environment, FileSystemLoader class MicroServer(object):", "server. Set the port it should listen on. # And", "{} # Set up the paths and environment for Jinja.", "import hashlib import mimetypes from wsgiref.simple_server import WSGIServer, WSGIRequestHandler from", "or the path in the request. if resource: file_path =", "is None: self.templates_root = 'templates' if self.routes is None: self.routes", "def run(self, port): \"\"\"Starts the HTTP server and tells it", "the response object to send it to the client. template", "should listen on. # And start the server. server =", "'templates' if self.routes is None: self.routes = {} # Set", "the WebOb Request and Response objects for # used to", "the response object to send it to the client. #", "If there is call it. If not call the static", "to send it to the client. template = self.env.get_template(template_name) html", "guess its type and write its # content to the", "for # used to read the request and write the", "# Find a handler for the path if there is", "*parts): \"\"\"Creates a hash from strings based on a passphrase.\"\"\"", "HTML\"\"\" # Find the template and render it to HTML", "self.static_root = 'static' if self.templates_root is None: self.templates_root = 'templates'", "Build a file path using either the resource parameter or", "self.response.write(str(e)) def render_template(self, template_name, template_values={}): \"\"\"Renders Jinja2 templates into HTML\"\"\"", "as e: self.response.status = 404 self.response.write(str(e)) def render_template(self, template_name, template_values={}):", "path in the request. if resource: file_path = os.path.join(self.current_dir, self.static_root,", "# used to read the request and write the response.", "= Request(environ) self.response = Response() # Find a handler for", "\"\"\" import os import hmac import hashlib import mimetypes from", "Response from jinja2 import Environment, FileSystemLoader class MicroServer(object): \"\"\"Small web", "there is one. handler = self.routes.get(self.request.path_info) # If there is", "the site's resources. self.current_dir = os.path.dirname(os.path.realpath(__file__)) def __call__(self, environ, start_response):", "= self.routes.get(self.request.path_info) # If there is call it. If not", "its # content to the response object to send it", "and configures the paths and the Jinja2 environment so it", "mimetypes.guess_type(file_path)[0] self.response.content_type = file_type data = open(file_path, 'rb').read() self.response.body_file.write(data) except", "Create the WebOb Request and Response objects for # used", "self.env.get_template(template_name) html = template.render(template_values) self.response.write(html) def get_signature(self, passphrase, *parts): \"\"\"Creates", "return self.response(environ, start_response) def static(self, resource=''): \"\"\"Handles request for static", "a handler for the path if there is one. handler", "the request. if resource: file_path = os.path.join(self.current_dir, self.static_root, resource) else:", "for the path if there is one. handler = self.routes.get(self.request.path_info)", "self.request.path_info[1:]) print(\"File path:\", file_path) # Try to open the file.", "\"\"\"Initializes the class and configures the paths and the Jinja2", "called by the HTTPServer when there is a request to", "request and write the response. self.request = Request(environ) self.response =", "then guess its type and write its # content to", "mimetypes from wsgiref.simple_server import WSGIServer, WSGIRequestHandler from webob import Request", "self.routes = {} # Set up the paths and environment", "else: file_path = os.path.join(self.current_dir, self.static_root, self.request.path_info[1:]) print(\"File path:\", file_path) #", "server is running it as save the path. # The", "self.response.status = 404 self.response.write(str(e)) def render_template(self, template_name, template_values={}): \"\"\"Renders Jinja2", "WSGI with a simple router \"\"\" import os import hmac", "get_signature(self, passphrase, *parts): \"\"\"Creates a hash from strings based on", "def static(self, resource=''): \"\"\"Handles request for static pages. It is", "the response. self.request = Request(environ) self.response = Response() # Find", "file_path = os.path.join(self.current_dir, self.static_root, resource) else: file_path = os.path.join(self.current_dir, self.static_root,", "= os.path.join(self.current_dir, self.static_root, resource) else: file_path = os.path.join(self.current_dir, self.static_root, self.request.path_info[1:])", "# content to the response object to send it to", "is a request to be handled.\"\"\" # Create the WebOb", "self.current_dir = os.path.dirname(os.path.realpath(__file__)) def __call__(self, environ, start_response): \"\"\"This method is", "and write its # content to the response object to", "server and tells it what port to listen on\"\"\" #", "the path in the request. if resource: file_path = os.path.join(self.current_dir,", "self.static() return self.response(environ, start_response) def static(self, resource=''): \"\"\"Handles request for", "the client. # If we can't find the file then", "the client. try: file_type = mimetypes.guess_type(file_path)[0] self.response.content_type = file_type data", "the server is running it as save the path. #", "port it should listen on. # And start the server.", "and the Jinja2 environment so it can find and render", "self.routes is None: self.routes = {} # Set up the", "It is the default handler.\"\"\" # Build a file path", "object to send it to the client. # If we", "start the server. server = WSGIServer(('', 8000), WSGIRequestHandler) server.set_app(self) print(\"Serving", "to read the request and write the response. self.request =", "handler() else: self.static() return self.response(environ, start_response) def static(self, resource=''): \"\"\"Handles", "how it finds the templates. self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root) self.env", "port to listen on\"\"\" # Create the WSGI HTTP server.", "self.request = Request(environ) self.response = Response() # Find a handler", "if self.routes is None: self.routes = {} # Set up", "if resource: file_path = os.path.join(self.current_dir, self.static_root, resource) else: file_path =", "= template.render(template_values) self.response.write(html) def get_signature(self, passphrase, *parts): \"\"\"Creates a hash", "webob import Request from webob import Response from jinja2 import", "from webob import Request from webob import Response from jinja2", "the paths and the Jinja2 environment so it can find", "what directory the server is running it as save the", "os import hmac import hashlib import mimetypes from wsgiref.simple_server import", "method is called by the HTTPServer when there is a", "\"\"\"Handles request for static pages. It is the default handler.\"\"\"", "can't find the file then return an error to the", "If we can then guess its type and write its", "find the file then return an error to the client.", "part in parts: cookiehash.update(part.encode()) return cookiehash.hexdigest() def run(self, port): \"\"\"Starts", "template = self.env.get_template(template_name) html = template.render(template_values) self.response.write(html) def get_signature(self, passphrase,", "client. try: file_type = mimetypes.guess_type(file_path)[0] self.response.content_type = file_type data =", "\"\"\"Starts the HTTP server and tells it what port to", "the templates. self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root) self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path))", "= Environment(autoescape=True, loader=FileSystemLoader(self.template_path)) # Figure out what directory the server", "and render it to HTML # then write it to", "Jinja2 templates into HTML\"\"\" # Find the template and render", "out what directory the server is running it as save", "environ, start_response): \"\"\"This method is called by the HTTPServer when", "call it. If not call the static handler. if handler:", "send it to the client. # If we can't find", "passphrase.\"\"\" cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1) for part in parts: cookiehash.update(part.encode())", "write the response. self.request = Request(environ) self.response = Response() #", "import os import hmac import hashlib import mimetypes from wsgiref.simple_server", "import Request from webob import Response from jinja2 import Environment,", "hmac.new(passphrase.encode(), digestmod=hashlib.sha1) for part in parts: cookiehash.update(part.encode()) return cookiehash.hexdigest() def", "resources. self.current_dir = os.path.dirname(os.path.realpath(__file__)) def __call__(self, environ, start_response): \"\"\"This method", "on a passphrase.\"\"\" cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1) for part in", "Create the WSGI HTTP server. Set the port it should", "= os.path.join(os.path.dirname(__file__), self.templates_root) self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path)) # Figure out", "the request and write the response. self.request = Request(environ) self.response", "request to be handled.\"\"\" # Create the WebOb Request and", "be used later to find the site's resources. self.current_dir =", "Response objects for # used to read the request and", "its type and write its # content to the response", "self.response = Response() # Find a handler for the path", "# Figure out what directory the server is running it", "= 'static' if self.templates_root is None: self.templates_root = 'templates' if", "digestmod=hashlib.sha1) for part in parts: cookiehash.update(part.encode()) return cookiehash.hexdigest() def run(self,", "path. # The path will be used later to find", "environment for Jinja. This is how it finds the templates.", "file_path = os.path.join(self.current_dir, self.static_root, self.request.path_info[1:]) print(\"File path:\", file_path) # Try", "to the client. template = self.env.get_template(template_name) html = template.render(template_values) self.response.write(html)", "configures the paths and the Jinja2 environment so it can", "there is a request to be handled.\"\"\" # Create the", "loader=FileSystemLoader(self.template_path)) # Figure out what directory the server is running", "by the HTTPServer when there is a request to be", "HTTP server and tells it what port to listen on\"\"\"", "static(self, resource=''): \"\"\"Handles request for static pages. It is the", "on WebOb, Jinja2, WSGI with a simple router \"\"\" import", "# If there is call it. If not call the", "on. # And start the server. server = WSGIServer(('', 8000),", "from jinja2 import Environment, FileSystemLoader class MicroServer(object): \"\"\"Small web server.\"\"\"", "server = WSGIServer(('', 8000), WSGIRequestHandler) server.set_app(self) print(\"Serving on http://localhost:8000/ ...\")", "= hmac.new(passphrase.encode(), digestmod=hashlib.sha1) for part in parts: cookiehash.update(part.encode()) return cookiehash.hexdigest()", "The path will be used later to find the site's", "self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root) self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path)) # Figure", "\"\"\"Creates a hash from strings based on a passphrase.\"\"\" cookiehash", "templates into HTML\"\"\" # Find the template and render it", "it to the response object to send it to the", "render pages.\"\"\" if self.static_root is None: self.static_root = 'static' if", "save the path. # The path will be used later", "it to the client. # If we can't find the", "os.path.join(self.current_dir, self.static_root, self.request.path_info[1:]) print(\"File path:\", file_path) # Try to open", "site's resources. self.current_dir = os.path.dirname(os.path.realpath(__file__)) def __call__(self, environ, start_response): \"\"\"This", "default handler.\"\"\" # Build a file path using either the", "the client. template = self.env.get_template(template_name) html = template.render(template_values) self.response.write(html) def", "can find and render pages.\"\"\" if self.static_root is None: self.static_root", "cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1) for part in parts: cookiehash.update(part.encode()) return", "passphrase, *parts): \"\"\"Creates a hash from strings based on a", "to be handled.\"\"\" # Create the WebOb Request and Response", "is call it. If not call the static handler. if", "port): \"\"\"Starts the HTTP server and tells it what port", "self.static_root is None: self.static_root = 'static' if self.templates_root is None:", "handler: handler() else: self.static() return self.response(environ, start_response) def static(self, resource=''):", "with a simple router \"\"\" import os import hmac import", "templates. self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root) self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path)) #", "= file_type data = open(file_path, 'rb').read() self.response.body_file.write(data) except Exception as", "static pages. It is the default handler.\"\"\" # Build a", "parameter or the path in the request. if resource: file_path", "MicroServer(object): \"\"\"Small web server.\"\"\" def __init__(self): \"\"\"Initializes the class and", "self.templates_root is None: self.templates_root = 'templates' if self.routes is None:", "\"\"\" Micro webapp based on WebOb, Jinja2, WSGI with a", "import mimetypes from wsgiref.simple_server import WSGIServer, WSGIRequestHandler from webob import", "in parts: cookiehash.update(part.encode()) return cookiehash.hexdigest() def run(self, port): \"\"\"Starts the", "# The path will be used later to find the", "is called by the HTTPServer when there is a request", "is how it finds the templates. self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root)", "request. if resource: file_path = os.path.join(self.current_dir, self.static_root, resource) else: file_path", "to the client. try: file_type = mimetypes.guess_type(file_path)[0] self.response.content_type = file_type", "Try to open the file. If we can then guess", "hmac import hashlib import mimetypes from wsgiref.simple_server import WSGIServer, WSGIRequestHandler", "it what port to listen on\"\"\" # Create the WSGI", "the file. If we can then guess its type and", "paths and environment for Jinja. This is how it finds", "file. If we can then guess its type and write", "handler = self.routes.get(self.request.path_info) # If there is call it. If", "handler for the path if there is one. handler =", "client. template = self.env.get_template(template_name) html = template.render(template_values) self.response.write(html) def get_signature(self,", "= os.path.join(self.current_dir, self.static_root, self.request.path_info[1:]) print(\"File path:\", file_path) # Try to", "# Set up the paths and environment for Jinja. This", "used later to find the site's resources. self.current_dir = os.path.dirname(os.path.realpath(__file__))", "Response() # Find a handler for the path if there", "template_name, template_values={}): \"\"\"Renders Jinja2 templates into HTML\"\"\" # Find the", "in the request. if resource: file_path = os.path.join(self.current_dir, self.static_root, resource)", "send it to the client. template = self.env.get_template(template_name) html =", "WSGIServer, WSGIRequestHandler from webob import Request from webob import Response", "Find a handler for the path if there is one.", "object to send it to the client. template = self.env.get_template(template_name)", "it as save the path. # The path will be", "tells it what port to listen on\"\"\" # Create the", "os.path.dirname(os.path.realpath(__file__)) def __call__(self, environ, start_response): \"\"\"This method is called by", "paths and the Jinja2 environment so it can find and", "it to HTML # then write it to the response", "to find the site's resources. self.current_dir = os.path.dirname(os.path.realpath(__file__)) def __call__(self,", "And start the server. server = WSGIServer(('', 8000), WSGIRequestHandler) server.set_app(self)", "the default handler.\"\"\" # Build a file path using either", "Find the template and render it to HTML # then", "# Create the WSGI HTTP server. Set the port it", "then return an error to the client. try: file_type =", "FileSystemLoader class MicroServer(object): \"\"\"Small web server.\"\"\" def __init__(self): \"\"\"Initializes the", "= 'templates' if self.routes is None: self.routes = {} #", "= Response() # Find a handler for the path if", "based on a passphrase.\"\"\" cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1) for part", "then write it to the response object to send it", "Micro webapp based on WebOb, Jinja2, WSGI with a simple", "resource) else: file_path = os.path.join(self.current_dir, self.static_root, self.request.path_info[1:]) print(\"File path:\", file_path)", "self.static_root, self.request.path_info[1:]) print(\"File path:\", file_path) # Try to open the", "return an error to the client. try: file_type = mimetypes.guess_type(file_path)[0]", "the resource parameter or the path in the request. if", "\"\"\"Renders Jinja2 templates into HTML\"\"\" # Find the template and", "the template and render it to HTML # then write", "template and render it to HTML # then write it", "Environment(autoescape=True, loader=FileSystemLoader(self.template_path)) # Figure out what directory the server is", "listen on. # And start the server. server = WSGIServer(('',", "server.\"\"\" def __init__(self): \"\"\"Initializes the class and configures the paths", "read the request and write the response. self.request = Request(environ)", "This is how it finds the templates. self.template_path = os.path.join(os.path.dirname(__file__),", "and Response objects for # used to read the request", "If we can't find the file then return an error", "it to the client. template = self.env.get_template(template_name) html = template.render(template_values)", "parts: cookiehash.update(part.encode()) return cookiehash.hexdigest() def run(self, port): \"\"\"Starts the HTTP", "404 self.response.write(str(e)) def render_template(self, template_name, template_values={}): \"\"\"Renders Jinja2 templates into", "write it to the response object to send it to", "'rb').read() self.response.body_file.write(data) except Exception as e: self.response.status = 404 self.response.write(str(e))", "open(file_path, 'rb').read() self.response.body_file.write(data) except Exception as e: self.response.status = 404", "# Try to open the file. If we can then", "# And start the server. server = WSGIServer(('', 8000), WSGIRequestHandler)", "using either the resource parameter or the path in the", "cookiehash.update(part.encode()) return cookiehash.hexdigest() def run(self, port): \"\"\"Starts the HTTP server", "import Environment, FileSystemLoader class MicroServer(object): \"\"\"Small web server.\"\"\" def __init__(self):", "resource=''): \"\"\"Handles request for static pages. It is the default", "content to the response object to send it to the", "error to the client. try: file_type = mimetypes.guess_type(file_path)[0] self.response.content_type =", "hash from strings based on a passphrase.\"\"\" cookiehash = hmac.new(passphrase.encode(),", "self.templates_root = 'templates' if self.routes is None: self.routes = {}", "from strings based on a passphrase.\"\"\" cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1)", "else: self.static() return self.response(environ, start_response) def static(self, resource=''): \"\"\"Handles request", "we can then guess its type and write its #", "type and write its # content to the response object", "def get_signature(self, passphrase, *parts): \"\"\"Creates a hash from strings based", "resource parameter or the path in the request. if resource:", "Request(environ) self.response = Response() # Find a handler for the", "try: file_type = mimetypes.guess_type(file_path)[0] self.response.content_type = file_type data = open(file_path,", "call the static handler. if handler: handler() else: self.static() return", "file_path) # Try to open the file. If we can", "finds the templates. self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root) self.env = Environment(autoescape=True,", "write its # content to the response object to send", "If not call the static handler. if handler: handler() else:", "later to find the site's resources. self.current_dir = os.path.dirname(os.path.realpath(__file__)) def", "Exception as e: self.response.status = 404 self.response.write(str(e)) def render_template(self, template_name,", "not call the static handler. if handler: handler() else: self.static()", "as save the path. # The path will be used", "WebOb Request and Response objects for # used to read", "Set the port it should listen on. # And start", "from wsgiref.simple_server import WSGIServer, WSGIRequestHandler from webob import Request from", "if self.static_root is None: self.static_root = 'static' if self.templates_root is", "path:\", file_path) # Try to open the file. If we", "it should listen on. # And start the server. server", "so it can find and render pages.\"\"\" if self.static_root is", "and environment for Jinja. This is how it finds the", "there is call it. If not call the static handler.", "file then return an error to the client. try: file_type", "self.templates_root) self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path)) # Figure out what directory", "if there is one. handler = self.routes.get(self.request.path_info) # If there", "None: self.routes = {} # Set up the paths and", "is running it as save the path. # The path", "__init__(self): \"\"\"Initializes the class and configures the paths and the", "# then write it to the response object to send", "one. handler = self.routes.get(self.request.path_info) # If there is call it.", "web server.\"\"\" def __init__(self): \"\"\"Initializes the class and configures the", "self.response.body_file.write(data) except Exception as e: self.response.status = 404 self.response.write(str(e)) def", "up the paths and environment for Jinja. This is how", "the server. server = WSGIServer(('', 8000), WSGIRequestHandler) server.set_app(self) print(\"Serving on", "if handler: handler() else: self.static() return self.response(environ, start_response) def static(self,", "the HTTP server and tells it what port to listen", "# Create the WebOb Request and Response objects for #", "to send it to the client. # If we can't", "= self.env.get_template(template_name) html = template.render(template_values) self.response.write(html) def get_signature(self, passphrase, *parts):", "import Response from jinja2 import Environment, FileSystemLoader class MicroServer(object): \"\"\"Small", "jinja2 import Environment, FileSystemLoader class MicroServer(object): \"\"\"Small web server.\"\"\" def", "Jinja2 environment so it can find and render pages.\"\"\" if", "to the response object to send it to the client.", "WebOb, Jinja2, WSGI with a simple router \"\"\" import os", "listen on\"\"\" # Create the WSGI HTTP server. Set the", "path will be used later to find the site's resources.", "from webob import Response from jinja2 import Environment, FileSystemLoader class", "response. self.request = Request(environ) self.response = Response() # Find a", "HTML # then write it to the response object to", "self.response.write(html) def get_signature(self, passphrase, *parts): \"\"\"Creates a hash from strings", "it can find and render pages.\"\"\" if self.static_root is None:", "the paths and environment for Jinja. This is how it", "we can't find the file then return an error to", "wsgiref.simple_server import WSGIServer, WSGIRequestHandler from webob import Request from webob", "import WSGIServer, WSGIRequestHandler from webob import Request from webob import", "HTTPServer when there is a request to be handled.\"\"\" #", "find and render pages.\"\"\" if self.static_root is None: self.static_root =", "a request to be handled.\"\"\" # Create the WebOb Request", "file path using either the resource parameter or the path", "the static handler. if handler: handler() else: self.static() return self.response(environ,", "__call__(self, environ, start_response): \"\"\"This method is called by the HTTPServer", "into HTML\"\"\" # Find the template and render it to", "the Jinja2 environment so it can find and render pages.\"\"\"", "# Build a file path using either the resource parameter", "client. # If we can't find the file then return", "the path if there is one. handler = self.routes.get(self.request.path_info) #", "it finds the templates. self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root) self.env =", "router \"\"\" import os import hmac import hashlib import mimetypes", "used to read the request and write the response. self.request", "to the client. # If we can't find the file", "is one. handler = self.routes.get(self.request.path_info) # If there is call", "template.render(template_values) self.response.write(html) def get_signature(self, passphrase, *parts): \"\"\"Creates a hash from", "can then guess its type and write its # content", "None: self.templates_root = 'templates' if self.routes is None: self.routes =", "cookiehash.hexdigest() def run(self, port): \"\"\"Starts the HTTP server and tells", "for Jinja. This is how it finds the templates. self.template_path", "print(\"File path:\", file_path) # Try to open the file. If" ]
[ "= set(UserProfile.objects.values_list('email', flat=True) # whoa .filter(addons__versions__files__jetpack_version__isnull=False)) log.info('There are %d emails", "release candidate build - https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip - and update your", "upgrade any SDK-based add-ons we find that are using verions", "add-ons we find that are using verions 1.0RC2 or greater", "SDK below 1.0RC2 will not be auto-updated and you will", "our open source community! Sincerely, The Mozilla Add-ons Team \"\"\"", "SDK's development. Feedback and engagement from developers like you are", "of the Add-on SDK only a week away, we wanted", "you for participating in the early stages of the Add-on", "participating in the early stages of the Add-on SDK's development.", "def handle(self, *args, **options): sendmail() def sendmail(): addrs = set(UserProfile.objects.values_list('email',", "e)) SUBJECT = 'Instructions for Automatic Upgrade to Add-on SDK", "1.0 final version of the SDK, we would ask that", "%s' % (count, addr)) except Exception, e: log.info('%s. FAIL: %s", "are using verions 1.0RC2 or greater to the 1.0 final", "add-ons with new versions of the Add-on SDK upon release.", "1.0RC2 or greater to the 1.0 final version of the", "using versions of the SDK below 1.0RC2 will not be", "1.0RC2 will not be auto-updated and you will need to", "0 for addr in addrs: count += 1 try: mail.send_mail(SUBJECT,", "%d emails to send.' % len(addrs)) count = 0 for", "1.0 final version of the SDK. Any add-ons we find", "bug 662571\" def handle(self, *args, **options): sendmail() def sendmail(): addrs", "SDK only a week away, we wanted to get in", "*args, **options): sendmail() def sendmail(): addrs = set(UserProfile.objects.values_list('email', flat=True) #", "the SDK, we would ask that you download the latest", "will need to upgrade them to the 1.0 version of", "will be auto-updating add-ons with new versions of the Add-on", "version of the SDK manually. Thank you for participating in", "+= 1 try: mail.send_mail(SUBJECT, MSG, FROM, [addr]) log.info('%s. DONE: %s'", "using verions 1.0RC2 or greater to the 1.0 final version", "with all add-on developers who have existing SDK-based (Jetpack) add-ons.", "are auto-updated with the 1.0 final version of the SDK,", "database and automatically upgrade any SDK-based add-ons we find that", "import settings from django.core.management.base import BaseCommand import amo.utils from users.models", "we find that are using verions 1.0RC2 or greater to", "UserProfile log = logging.getLogger('z.mailer') FROM = settings.DEFAULT_FROM_EMAIL class Command(BaseCommand): help", "the SDK manually. Thank you for participating in the early", "developers like you are the foundations for success in our", "Feedback and engagement from developers like you are the foundations", "final version of the SDK. Any add-ons we find using", "new versions of the Add-on SDK upon release. To ensure", "662571\" def handle(self, *args, **options): sendmail() def sendmail(): addrs =", "FROM = settings.DEFAULT_FROM_EMAIL class Command(BaseCommand): help = \"Send the email", "Hello Mozilla Add-ons Developer! With the final version of the", "the 1.0 release, we will scan our add-ons database and", "automatically upgrade any SDK-based add-ons we find that are using", "the latest release candidate build - https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip - and", "addrs = set(UserProfile.objects.values_list('email', flat=True) # whoa .filter(addons__versions__files__jetpack_version__isnull=False)) log.info('There are %d", "(Jetpack) add-ons. We would like you to know that going", "to upgrade them to the 1.0 version of the SDK", "the SDK below 1.0RC2 will not be auto-updated and you", "in our open source community! Sincerely, The Mozilla Add-ons Team", "Add-ons Developer! With the final version of the Add-on SDK", "from django.core.management.base import BaseCommand import amo.utils from users.models import UserProfile", "touch with all add-on developers who have existing SDK-based (Jetpack)", "find that are using verions 1.0RC2 or greater to the", "from developers like you are the foundations for success in", "like you are the foundations for success in our open", "only a week away, we wanted to get in touch", "%s (%s)' % (count, addr, e)) SUBJECT = 'Instructions for", "= 'Instructions for Automatic Upgrade to Add-on SDK 1.0' MSG", "'Instructions for Automatic Upgrade to Add-on SDK 1.0' MSG =", "import logging from django.core import mail from django.conf import settings", "the final version of the Add-on SDK only a week", "SDK-based (Jetpack) add-ons. We would like you to know that", "1.0' MSG = \"\"\"\\ Hello Mozilla Add-ons Developer! With the", "on AMO. After the 1.0 release, we will scan our", "with the 1.0 final version of the SDK, we would", "from django.core import mail from django.conf import settings from django.core.management.base", "log.info('%s. FAIL: %s (%s)' % (count, addr, e)) SUBJECT =", "with new versions of the Add-on SDK upon release. To", "add-ons we find using versions of the SDK below 1.0RC2", "sendmail() def sendmail(): addrs = set(UserProfile.objects.values_list('email', flat=True) # whoa .filter(addons__versions__files__jetpack_version__isnull=False))", "you download the latest release candidate build - https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip", "wanted to get in touch with all add-on developers who", "help = \"Send the email for bug 662571\" def handle(self,", "handle(self, *args, **options): sendmail() def sendmail(): addrs = set(UserProfile.objects.values_list('email', flat=True)", "% (count, addr, e)) SUBJECT = 'Instructions for Automatic Upgrade", "Add-on SDK only a week away, we wanted to get", "except Exception, e: log.info('%s. FAIL: %s (%s)' % (count, addr,", "import amo.utils from users.models import UserProfile log = logging.getLogger('z.mailer') FROM", "in touch with all add-on developers who have existing SDK-based", "addr, e)) SUBJECT = 'Instructions for Automatic Upgrade to Add-on", "greater to the 1.0 final version of the SDK. Any", "to the 1.0 final version of the SDK. Any add-ons", "all add-on developers who have existing SDK-based (Jetpack) add-ons. We", "existing SDK-based (Jetpack) add-ons. We would like you to know", "for success in our open source community! Sincerely, The Mozilla", "versions of the Add-on SDK upon release. To ensure that", "log.info('%s. DONE: %s' % (count, addr)) except Exception, e: log.info('%s.", "Developer! With the final version of the Add-on SDK only", "like you to know that going forward AMO will be", "the 1.0 version of the SDK manually. Thank you for", "that are using verions 1.0RC2 or greater to the 1.0", "foundations for success in our open source community! Sincerely, The", "final version of the SDK, we would ask that you", "FAIL: %s (%s)' % (count, addr, e)) SUBJECT = 'Instructions", "the Add-on SDK's development. Feedback and engagement from developers like", "Automatic Upgrade to Add-on SDK 1.0' MSG = \"\"\"\\ Hello", "development. Feedback and engagement from developers like you are the", "the Add-on SDK upon release. To ensure that your add-on(s)", "to the 1.0 version of the SDK manually. Thank you", "def sendmail(): addrs = set(UserProfile.objects.values_list('email', flat=True) # whoa .filter(addons__versions__files__jetpack_version__isnull=False)) log.info('There", "= 0 for addr in addrs: count += 1 try:", "we wanted to get in touch with all add-on developers", "we find using versions of the SDK below 1.0RC2 will", "count += 1 try: mail.send_mail(SUBJECT, MSG, FROM, [addr]) log.info('%s. DONE:", "SDK. Any add-ons we find using versions of the SDK", "import BaseCommand import amo.utils from users.models import UserProfile log =", "DONE: %s' % (count, addr)) except Exception, e: log.info('%s. FAIL:", "Upgrade to Add-on SDK 1.0' MSG = \"\"\"\\ Hello Mozilla", "know that going forward AMO will be auto-updating add-ons with", "logging.getLogger('z.mailer') FROM = settings.DEFAULT_FROM_EMAIL class Command(BaseCommand): help = \"Send the", "in addrs: count += 1 try: mail.send_mail(SUBJECT, MSG, FROM, [addr])", "AMO will be auto-updating add-ons with new versions of the", "would like you to know that going forward AMO will", "- and update your add-on(s) on AMO. After the 1.0", "versions of the SDK below 1.0RC2 will not be auto-updated", "add-ons. We would like you to know that going forward", ".filter(addons__versions__files__jetpack_version__isnull=False)) log.info('There are %d emails to send.' % len(addrs)) count", "Add-on SDK 1.0' MSG = \"\"\"\\ Hello Mozilla Add-ons Developer!", "try: mail.send_mail(SUBJECT, MSG, FROM, [addr]) log.info('%s. DONE: %s' % (count,", "email for bug 662571\" def handle(self, *args, **options): sendmail() def", "# whoa .filter(addons__versions__files__jetpack_version__isnull=False)) log.info('There are %d emails to send.' %", "= logging.getLogger('z.mailer') FROM = settings.DEFAULT_FROM_EMAIL class Command(BaseCommand): help = \"Send", "that your add-on(s) are auto-updated with the 1.0 final version", "that going forward AMO will be auto-updating add-ons with new", "download the latest release candidate build - https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip -", "upon release. To ensure that your add-on(s) are auto-updated with", "% (count, addr)) except Exception, e: log.info('%s. FAIL: %s (%s)'", "of the Add-on SDK's development. Feedback and engagement from developers", "a week away, we wanted to get in touch with", "your add-on(s) are auto-updated with the 1.0 final version of", "Exception, e: log.info('%s. FAIL: %s (%s)' % (count, addr, e))", "To ensure that your add-on(s) are auto-updated with the 1.0", "https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip - and update your add-on(s) on AMO. After the", "1.0 release, we will scan our add-ons database and automatically", "would ask that you download the latest release candidate build", "upgrade them to the 1.0 version of the SDK manually.", "settings.DEFAULT_FROM_EMAIL class Command(BaseCommand): help = \"Send the email for bug", "scan our add-ons database and automatically upgrade any SDK-based add-ons", "stages of the Add-on SDK's development. Feedback and engagement from", "SDK 1.0' MSG = \"\"\"\\ Hello Mozilla Add-ons Developer! With", "(%s)' % (count, addr, e)) SUBJECT = 'Instructions for Automatic", "(count, addr, e)) SUBJECT = 'Instructions for Automatic Upgrade to", "add-on developers who have existing SDK-based (Jetpack) add-ons. We would", "SDK, we would ask that you download the latest release", "= settings.DEFAULT_FROM_EMAIL class Command(BaseCommand): help = \"Send the email for", "count = 0 for addr in addrs: count += 1", "success in our open source community! Sincerely, The Mozilla Add-ons", "import mail from django.conf import settings from django.core.management.base import BaseCommand", "from django.conf import settings from django.core.management.base import BaseCommand import amo.utils", "- https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip - and update your add-on(s) on AMO.", "the foundations for success in our open source community! Sincerely,", "final version of the Add-on SDK only a week away,", "not be auto-updated and you will need to upgrade them", "auto-updated and you will need to upgrade them to the", "After the 1.0 release, we will scan our add-ons database", "Add-on SDK upon release. To ensure that your add-on(s) are", "early stages of the Add-on SDK's development. Feedback and engagement", "send.' % len(addrs)) count = 0 for addr in addrs:", "mail from django.conf import settings from django.core.management.base import BaseCommand import", "Any add-ons we find using versions of the SDK below", "the Add-on SDK only a week away, we wanted to", "will not be auto-updated and you will need to upgrade", "will scan our add-ons database and automatically upgrade any SDK-based", "of the Add-on SDK upon release. To ensure that your", "and engagement from developers like you are the foundations for", "are %d emails to send.' % len(addrs)) count = 0", "e: log.info('%s. FAIL: %s (%s)' % (count, addr, e)) SUBJECT", "release, we will scan our add-ons database and automatically upgrade", "are the foundations for success in our open source community!", "We would like you to know that going forward AMO", "class Command(BaseCommand): help = \"Send the email for bug 662571\"", "auto-updating add-ons with new versions of the Add-on SDK upon", "FROM, [addr]) log.info('%s. DONE: %s' % (count, addr)) except Exception,", "the 1.0 final version of the SDK. Any add-ons we", "django.core.management.base import BaseCommand import amo.utils from users.models import UserProfile log", "below 1.0RC2 will not be auto-updated and you will need", "settings from django.core.management.base import BaseCommand import amo.utils from users.models import", "set(UserProfile.objects.values_list('email', flat=True) # whoa .filter(addons__versions__files__jetpack_version__isnull=False)) log.info('There are %d emails to", "to send.' % len(addrs)) count = 0 for addr in", "= \"\"\"\\ Hello Mozilla Add-ons Developer! With the final version", "django.core import mail from django.conf import settings from django.core.management.base import", "to get in touch with all add-on developers who have", "\"Send the email for bug 662571\" def handle(self, *args, **options):", "to Add-on SDK 1.0' MSG = \"\"\"\\ Hello Mozilla Add-ons", "build - https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip - and update your add-on(s) on", "https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip - and update your add-on(s) on AMO. After", "add-ons database and automatically upgrade any SDK-based add-ons we find", "them to the 1.0 version of the SDK manually. Thank", "1.0 version of the SDK manually. Thank you for participating", "logging from django.core import mail from django.conf import settings from", "the 1.0 final version of the SDK, we would ask", "whoa .filter(addons__versions__files__jetpack_version__isnull=False)) log.info('There are %d emails to send.' % len(addrs))", "% len(addrs)) count = 0 for addr in addrs: count", "\"\"\"\\ Hello Mozilla Add-ons Developer! With the final version of", "SDK upon release. To ensure that your add-on(s) are auto-updated", "ensure that your add-on(s) are auto-updated with the 1.0 final", "and you will need to upgrade them to the 1.0", "[addr]) log.info('%s. DONE: %s' % (count, addr)) except Exception, e:", "MSG = \"\"\"\\ Hello Mozilla Add-ons Developer! With the final", "MSG, FROM, [addr]) log.info('%s. DONE: %s' % (count, addr)) except", "Command(BaseCommand): help = \"Send the email for bug 662571\" def", "BaseCommand import amo.utils from users.models import UserProfile log = logging.getLogger('z.mailer')", "our add-ons database and automatically upgrade any SDK-based add-ons we", "the SDK. Any add-ons we find using versions of the", "django.conf import settings from django.core.management.base import BaseCommand import amo.utils from", "latest release candidate build - https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip - and update", "add-on(s) on AMO. After the 1.0 release, we will scan", "addr in addrs: count += 1 try: mail.send_mail(SUBJECT, MSG, FROM,", "log.info('There are %d emails to send.' % len(addrs)) count =", "your add-on(s) on AMO. After the 1.0 release, we will", "manually. Thank you for participating in the early stages of", "have existing SDK-based (Jetpack) add-ons. We would like you to", "forward AMO will be auto-updating add-ons with new versions of", "you to know that going forward AMO will be auto-updating", "version of the SDK, we would ask that you download", "Add-on SDK's development. Feedback and engagement from developers like you", "update your add-on(s) on AMO. After the 1.0 release, we", "version of the SDK. Any add-ons we find using versions", "of the SDK below 1.0RC2 will not be auto-updated and", "sendmail(): addrs = set(UserProfile.objects.values_list('email', flat=True) # whoa .filter(addons__versions__files__jetpack_version__isnull=False)) log.info('There are", "you will need to upgrade them to the 1.0 version", "you are the foundations for success in our open source", "candidate build - https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz, https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip - and update your add-on(s)", "Thank you for participating in the early stages of the", "for participating in the early stages of the Add-on SDK's", "away, we wanted to get in touch with all add-on", "SDK manually. Thank you for participating in the early stages", "With the final version of the Add-on SDK only a", "flat=True) # whoa .filter(addons__versions__files__jetpack_version__isnull=False)) log.info('There are %d emails to send.'", "(count, addr)) except Exception, e: log.info('%s. FAIL: %s (%s)' %", "in the early stages of the Add-on SDK's development. Feedback", "for Automatic Upgrade to Add-on SDK 1.0' MSG = \"\"\"\\", "or greater to the 1.0 final version of the SDK.", "SUBJECT = 'Instructions for Automatic Upgrade to Add-on SDK 1.0'", "find using versions of the SDK below 1.0RC2 will not", "need to upgrade them to the 1.0 version of the", "of the SDK manually. Thank you for participating in the", "for addr in addrs: count += 1 try: mail.send_mail(SUBJECT, MSG,", "add-on(s) are auto-updated with the 1.0 final version of the", "SDK-based add-ons we find that are using verions 1.0RC2 or", "users.models import UserProfile log = logging.getLogger('z.mailer') FROM = settings.DEFAULT_FROM_EMAIL class", "addrs: count += 1 try: mail.send_mail(SUBJECT, MSG, FROM, [addr]) log.info('%s.", "amo.utils from users.models import UserProfile log = logging.getLogger('z.mailer') FROM =", "be auto-updating add-ons with new versions of the Add-on SDK", "AMO. After the 1.0 release, we will scan our add-ons", "and automatically upgrade any SDK-based add-ons we find that are", "week away, we wanted to get in touch with all", "the email for bug 662571\" def handle(self, *args, **options): sendmail()", "developers who have existing SDK-based (Jetpack) add-ons. We would like", "the early stages of the Add-on SDK's development. Feedback and", "mail.send_mail(SUBJECT, MSG, FROM, [addr]) log.info('%s. DONE: %s' % (count, addr))", "get in touch with all add-on developers who have existing", "ask that you download the latest release candidate build -", "**options): sendmail() def sendmail(): addrs = set(UserProfile.objects.values_list('email', flat=True) # whoa", "Mozilla Add-ons Developer! With the final version of the Add-on", "any SDK-based add-ons we find that are using verions 1.0RC2", "log = logging.getLogger('z.mailer') FROM = settings.DEFAULT_FROM_EMAIL class Command(BaseCommand): help =", "we would ask that you download the latest release candidate", "verions 1.0RC2 or greater to the 1.0 final version of", "engagement from developers like you are the foundations for success", "auto-updated with the 1.0 final version of the SDK, we", "version of the Add-on SDK only a week away, we", "who have existing SDK-based (Jetpack) add-ons. We would like you", "going forward AMO will be auto-updating add-ons with new versions", "of the SDK, we would ask that you download the", "of the SDK. Any add-ons we find using versions of", "to know that going forward AMO will be auto-updating add-ons", "from users.models import UserProfile log = logging.getLogger('z.mailer') FROM = settings.DEFAULT_FROM_EMAIL", "len(addrs)) count = 0 for addr in addrs: count +=", "addr)) except Exception, e: log.info('%s. FAIL: %s (%s)' % (count,", "that you download the latest release candidate build - https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz,", "release. To ensure that your add-on(s) are auto-updated with the", "import UserProfile log = logging.getLogger('z.mailer') FROM = settings.DEFAULT_FROM_EMAIL class Command(BaseCommand):", "be auto-updated and you will need to upgrade them to", "we will scan our add-ons database and automatically upgrade any", "for bug 662571\" def handle(self, *args, **options): sendmail() def sendmail():", "1 try: mail.send_mail(SUBJECT, MSG, FROM, [addr]) log.info('%s. DONE: %s' %", "= \"Send the email for bug 662571\" def handle(self, *args,", "and update your add-on(s) on AMO. After the 1.0 release,", "emails to send.' % len(addrs)) count = 0 for addr" ]
[ "the first index and times along the second. \"\"\" #", "altitude = observer._altaz_cache[aakey]['altitude'] return altitude def compute_constraint(self, times, observer, targets):", "in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.any(constraint_arr, axis=1) def is_event_observable(constraints,", "compute the phase. For example, the system could be an", "max is None: self.max = 90*u.deg else: self.max = max", "contains the times for the alt/az computations, (2) 'altaz' contains", "that can occur when the Sun is below the horizon", "2). Returns ------- event_observable : `~numpy.ndarray` Array of booleans of", ">>> airmasses = np.array([1, 1.5, 2, 3, 0]) >>> min_best_rescale(airmasses,", "be disregarded return min_best_rescale(secz, mi, mx, less_than_min=0) class AtNightConstraint(Constraint): \"\"\"", "range, the rescale should return 0 below 35 and 1", "best (1) and worst (2.25). All values outside the range", "]) \"\"\" rescaled = (vals - max_val) / (min_val -", "raise NotImplementedError class AltitudeConstraint(Constraint): \"\"\" Constrain the altitude of the", "ValueError(\"No max and/or min specified in \" \"MoonSeparationConstraint.\") return mask", "1) greater_than_max : 0 or 1 what is returned for", "np.newaxis] else: targets = targets[..., np.newaxis] times, targets = observer._preprocess_inputs(times,", "as_strided(x, shape=shp1, strides=[0] * len(shp1)) b = as_strided(x, shape=shp2, strides=[0]", "which to test the constraint. targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}", "to times in some range of phases for a periodic", "use get_body rather than get sun here, since # it", "ARE SET? time_range : `~astropy.time.Time` (length = 2) Lower and", "raise ValueError(\"No max and/or min specified in \" \"SunSeparationConstraint.\") return", "= np.logical_and.reduce(applied_constraints) return np.all(constraint_arr, axis=1) def is_observable(constraints, observer, targets, times=None,", "with information about the target and it's observability: ``'target name'``,", ": `~astropy.time.Time` Array of times on which to test the", "time between civil twilights (-6 degrees). \"\"\" return cls(max_solar_altitude=-6*u.deg, **kwargs)", "import (absolute_import, division, print_function, unicode_literals) # Standard library from abc", "no limit. \"\"\" return cls(min, max, **kwargs) def compute_constraint(self, times,", "\" \"MoonSeparationConstraint.\") return mask class MoonIlluminationConstraint(Constraint): \"\"\" Constrain the fractional", ": {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets Returns", "---------- min : float or `None` (optional) Minimum acceptable fractional", "cached_moon['illum'] if self.min is None and self.max is not None:", "result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time`", "solar_separation elif self.max is None and self.min is not None:", "indicates no limit. Examples -------- Constrain the observations to targets", "class PrimaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times during primary eclipse.", "observer, target, times=None, times_ingress_egress=None): \"\"\" Determines if the ``target`` is", "`~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times : `~astropy.time.Time` (optional) Array of mid-event", "observable for a specific ``observer``, given the supplied ``constraints``. Parameters", "/ (max_val - min_val) below = vals < min_val above", "# Package from .moon import moon_illumination from .utils import time_grid_from_range", ">>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP", "use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to", "\"\"\" Constrain observations to times during secondary eclipse. \"\"\" def", "0 moon_up_mask = moon_alt >= 0 illumination = cached_moon['illum'] if", "return cls(min, max, **kwargs) def compute_constraint(self, times, observer, targets): #", "altitude calculations. This avoids errors in the altitude of the", "class GalacticLatitudeConstraint(Constraint): \"\"\" Constrain the distance between the Galactic plane", "observable at each time in ``times``, given constraints in ``constraints``", "isinstance(self.max, datetime.time): raise TypeError(\"Time limits must be specified as datetime.time", "def observability_table(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Creates a", "0.2, 0. , 0. ]) \"\"\" rescaled = (vals -", "float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None`", "PhaseConstraint(binary, min=0.6, max=0.2) \"\"\" self.periodic_event = periodic_event if (min <", "spaced between the two times in ``time_range`` with grid spacing", "@u.quantity_input(horizon=u.deg) def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True): \"\"\" Parameters ---------- max_solar_altitude :", "TypeError(\"Time limits must be specified as \" \"astropy.time.Time objects.\") if", "2D with targets along the first index and times along", "use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set``", "'__len__'): constraints = [constraints] applied_constraints = [constraint(observer, targets, times=times, time_range=time_range,", "> self.min, (phase >= self.min) & (phase <= self.max), (phase", "the Earth's moon. Constraint is also satisfied if the Moon", "= np.array([(t.time() >= min_time) or (t.time() <= max_time) for t", "of float or bool The constraints, with targets along the", "cls(max_solar_altitude=-12*u.deg, **kwargs) @classmethod def twilight_astronomical(cls, **kwargs): \"\"\" Consider nighttime as", "example, the system could be an eclipsing or non-eclipsing binary,", "Array of booleans of same length as ``times`` for whether", "0.0 self.max = max if max is not None else", "``targets`` are observable during ``time_range`` given constraints in ``constraints_list`` for", "= (times.jd,) # make hashable thing from targets coords try:", "to test the constraints times_ingress_egress : `~astropy.time.Time` (optional) Array of", "``vals`` below ``min_val``. (in some cases anything less than ``min_val``", "__init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True): \"\"\" Parameters ---------- max_solar_altitude : `~astropy.units.Quantity` The", "\"\"\" return cls(min, max, **kwargs) def compute_constraint(self, times, observer, targets):", "self.min is not None: if not isinstance(self.min, datetime.time): raise TypeError(\"Time", "(between zero and one), where the ``min_val`` goes to one,", "time observable'] * 24*u.hour tab.meta['times'] = times.datetime tab.meta['observer'] = observer", "is not None: mask = self.max >= separation elif self.max", "times, observer=None, targets=None): phase = self.periodic_event.phase(times) mask = np.where(self.max >", "targets): cached_altaz = _get_altaz(times, observer, targets) secz = cached_altaz['altaz'].secz.value if", "the best (60) going to 1 and worst (35) going", "is the moon up? cached_moon = _get_moon_data(times, observer) moon_alt =", "rescaled = (vals - min_val) / (max_val - min_val) below", "`~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude of target", "boolean_constraint=True): if min is None: self.min = -90*u.deg else: self.min", "For example, the system could be an eclipsing or non-eclipsing", "``vals`` equal to ``max_val`` equal 0 and those equal to", "on time sequence, with spacing ``time_resolution``. This will be passed", "hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache = {} # convert times to tuple", "dict Dictionary containing two key-value pairs. (1) 'times' contains the", "time: >>> from astroplan import Observer >>> from astroplan.constraints import", "the coords themselves # prior to evaluating multiple constraints. if", "t1 = Time(\"2016-03-28T12:00:00\") >>> t2 = Time(\"2016-03-30T12:00:00\") >>> constraint =", "or 1 what is returned for ``vals`` below ``min_val``. (in", "phases between 0.6 and 1.2, for example, you should subtract", "\"\"\" Consider nighttime as time between civil twilights (-6 degrees).", "+ '-12-31'] ) def _make_cache_key(times, targets): \"\"\" Make a unique", "a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.25 and", "float (optional) Maximum phase (inclusive) on interval [0, 1). Default", "astroplan.constraints import max_best_rescale >>> import numpy as np >>> altitudes", "to mishandle negative Parameters ---------- min : `~astropy.units.Quantity` or `None`", "max_val) below = vals < min_val above = vals >", "-90*u.deg else: self.min = min if max is None: self.max", "(optional) Maximum acceptable separation between moon and target (inclusive). `None`", "must be specified as \" \"astropy.time.Time objects.\") if self.max is", "float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None`", "= observer._altaz_cache[aakey]['altitude'] return altitude def compute_constraint(self, times, observer, targets): solar_altitude", "\"GalacticLatitudeConstraint.\") return mask class SunSeparationConstraint(Constraint): \"\"\" Constrain the distance between", "Time >>> subaru = Observer.at_site(\"Subaru\") >>> t1 = Time(\"2016-03-28T12:00:00\") >>>", "Minimum phase (inclusive) on interval [0, 1). Default is zero.", "= dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey] @abstractmethod class Constraint(object): \"\"\" Abstract class", "in ``times``, given constraints in ``constraints`` for a particular ``observer``.", "_get_altaz(times, observer, targets) secz = cached_altaz['altaz'].secz.value if self.boolean_constraint: if self.min", "times_ingress_egress[:, 1] applied_constraints_ing = [constraint(observer, target, times=times_ing, grid_times_targets=True) for constraint", "containing the observability information for each of the ``targets``. The", "anything less than ``min_val`` should also return one, in some", "datetime.time(0, 0, 0) if self.max is not None: max_time =", "1). Default is zero. max : float (optional) Maximum phase", "limit. \"\"\" return cls(min, max, **kwargs) def compute_constraint(self, times, observer,", "# treat as a SkyCoord object. Accessing the longitude #", "time_grid_resolution=0.5*u.hour): \"\"\" A function to determine whether ``targets`` are always", "for target, observable in zip(targets, constraint_arr): s = set([t.datetime.month for", "observations to times during primary eclipse. \"\"\" def __init__(self, eclipsing_system):", "= min if min is not None else 0.0 self.max", "the corrections for atmospheric refraction return nonsense values. \"\"\" self.max_solar_altitude", "some targets. \"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters ----------", "# assume targets is a string. targkey = (targets,) except", "on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array", "astropy.time import Time >>> subaru = Observer.at_site(\"Subaru\") >>> t1 =", "one. Examples -------- To constrain observations on orbital phases between", "2D array of float or bool The constraints. If 2D", "\" \"MoonSeparationConstraint.\") return mask class LocalTimeConstraint(Constraint): \"\"\" Constrain the observable", ": bool if True, grids the constraint result with targets", "24*u.hour tab.meta['times'] = times.datetime tab.meta['observer'] = observer tab.meta['constraints'] = constraints", "\"\"\" if not hasattr(observer, '_moon_cache'): observer._moon_cache = {} # convert", "not hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache = {} # convert times to", "max and/or min specified in \" \"SunSeparationConstraint.\") return mask class", "min_time = Time(\"1950-01-01T00:00:00\") if self.min is None else self.min max_time", "This avoids errors in the altitude of the Sun that", "no limit. max : `~astropy.units.Quantity` or `None` (optional) Maximum acceptable", "times, targets = observer._preprocess_inputs(times, targets, grid_times_targets=False) result = self.compute_constraint(times, observer,", "until astropy PR #5897 is released # which should be", "targets along the first index and times along the second.", "np.any(constraint_arr, axis=1) def is_event_observable(constraints, observer, target, times=None, times_ingress_egress=None): \"\"\" Determines", "by the observer. # 'get_sun' returns ICRS coords. sun =", ": `~datetime.time` Latest local time (inclusive). `None` indicates no limit.", "``'constraints'`` (containing the supplied ``constraints``). \"\"\" if not hasattr(constraints, '__len__'):", "be specified as \" \"astropy.time.Time objects.\") def compute_constraint(self, times, observer,", "\"TimeConstraint\", \"observability_table\", \"months_observable\", \"max_best_rescale\", \"min_best_rescale\", \"PhaseConstraint\", \"is_event_observable\"] _current_year = time.localtime().tm_year", "outside the range, the rescale should return 0 below 35", "min_val) / (max_val - min_val) below = vals < min_val", "observer.altaz(times, targets, grid_times_targets=False) observer._altaz_cache[aakey] = dict(times=times, altaz=altaz) finally: if force_zero_pressure:", "no limit. ephemeris : str, optional Ephemeris to use. If", "moon up? cached_moon = _get_moon_data(times, observer) moon_alt = cached_moon['altaz'].alt moon_down_mask", "is not None: max_time = self.max else: max_time = datetime.time(23,", "`~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine", "\"\"\" def __init__(self, eclipsing_system): \"\"\" Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem`", "is the min altitude and 1 is the max. \"\"\"", "set with `~astropy.coordinates.solar_system_ephemeris` (which is set to 'builtin' by default).", "grid_times_targets : bool if True, grids the constraint result with", "astroplan.constraints import min_best_rescale >>> import numpy as np >>> airmasses", "0. , 0. ]) \"\"\" rescaled = (vals - max_val)", "of the Earth's moon. Constraint is also satisfied if the", "solar altitude calculations. This avoids errors in the altitude of", "aakey not in observer._altaz_cache: try: if self.force_pressure_zero: observer_old_pressure = observer.pressure", "time between astronomical twilights (-18 degrees). \"\"\" return cls(max_solar_altitude=-18*u.deg, **kwargs)", "separated by ``time_resolution``. Default is 0.5 hours. Returns ------- observable_months", "vals > max_val rescaled[below] = less_than_min rescaled[above] = 0 return", "as_strided # Package from .moon import moon_illumination from .utils import", "limits. An example use case for this class would be", "**kwargs) @classmethod def twilight_astronomical(cls, **kwargs): \"\"\" Consider nighttime as time", "= cached_altaz['altaz'].secz.value if self.boolean_constraint: if self.min is None and self.max", "= ephemeris @classmethod def dark(cls, min=None, max=0.25, **kwargs): \"\"\" initialize", "= abs(targets.transform_to(Galactic).b) if self.min is None and self.max is not", "= observer.pressure observer.pressure = 0 altaz = observer.moon_altaz(times) illumination =", "zip(targets, constraint_arr): s = set([t.datetime.month for t in times[observable]]) months_observable.append(s)", "argument here, which causes small <1 deg # innacuracies, but", "of the target (inclusive). `None` indicates no limit. boolean_constraint :", "or (max > 1): raise ValueError('The minimum of the PhaseConstraint", "illumination (inclusive). `None` indicates no limit. \"\"\" return cls(min, max,", "if force_zero_pressure: observer.pressure = observer_old_pressure return observer._altaz_cache[aakey] def _get_moon_data(times, observer,", "Constraint is also satisfied if the Moon has set. \"\"\"", "``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). \"\"\" self.min", "so that ``vals`` equal to ``min_val`` equal 0 and those", "70]) >>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP array([ 0.", "of same length as ``times`` for whether or not the", "store these calculations in a dictionary. Parameters ---------- times :", "default). \"\"\" self.min = min self.max = max self.ephemeris =", "between 23:50 and 04:08 local Hawaiian time >>> constraint =", "shp1, shp2 = times.shape, targets.shape x = np.array([1]) a =", "**kwargs) @classmethod def twilight_nautical(cls, **kwargs): \"\"\" Consider nighttime as time", ": 1D or 2D array of float or bool The", "from abc import ABCMeta, abstractmethod import datetime import time import", "= LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) \"\"\" self.min = min self.max = max", "worst acceptable value (rescales to 0) max_val : float best", "For values outside the range, the rescale should return 0", "of targets time_range : `~astropy.time.Time` (optional) Lower and upper bounds", "= Time( # needed for backward compatibility [str(_current_year) + '-01-01',", "of times on which to test the constraint. targets :", "hasattr(observer, '_moon_cache'): observer._moon_cache = {} # convert times to tuple", "\"\"\" rescaled = (vals - max_val) / (min_val - max_val)", "rescaled[above] = 0 return rescaled def max_best_rescale(vals, min_val, max_val, greater_than_max=1):", "Parameters ---------- observer : `~astroplan.Observer` the observation location from which", "mask = self.min <= solar_separation elif self.min is not None", "max=None, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a", "def __init__(self, periodic_event, min=None, max=None): \"\"\" Parameters ---------- periodic_event :", "by ``time_resolution``. Default is 0.5 hours. Returns ------- observability_table :", "not specified, defaults to current year (localtime) time_grid_resolution : `~astropy.units.Quantity`", "23:50 and 04:08 local time: >>> from astroplan import Observer", "\"\"\" Determines if the ``target`` is observable at each time", "between 0 and 1 inclusive rescaled so that ``vals`` equal", "use as a cache key \"\"\" # make a tuple", "min specified in \" \"MoonSeparationConstraint.\") return mask class MoonIlluminationConstraint(Constraint): \"\"\"", "To create a constraint that requires the airmass be \"better", "\"\"\" Specify and constraints to determine which targets are observable", "min specified in \" \"GalacticLatitudeConstraint.\") return mask class SunSeparationConstraint(Constraint): \"\"\"", "numpy as np >>> airmasses = np.array([1, 1.5, 2, 3,", "[\"AltitudeConstraint\", \"AirmassConstraint\", \"AtNightConstraint\", \"is_observable\", \"is_always_observable\", \"time_grid_from_range\", \"GalacticLatitudeConstraint\", \"SunSeparationConstraint\", \"MoonSeparationConstraint\", \"MoonIlluminationConstraint\",", "= Time(\"1950-01-01T00:00:00\") if self.min is None else self.min max_time =", "`None` (optional) Minimum acceptable Galactic latitude of target (inclusive). `None`", "occur on same day: if min_time < max_time: try: mask", "in the time range given the constraints. \"\"\" if not", "59, 59) # If time limits occur on same day:", "def compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_secondary_eclipse(times) return mask", "`None` indicates no limit. max : `~datetime.time` Latest local time", "values. \"\"\" self.max_solar_altitude = max_solar_altitude self.force_pressure_zero = force_pressure_zero @classmethod def", "targets.shape x = np.array([1]) a = as_strided(x, shape=shp1, strides=[0] *", "---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation", "should subtract one from the second number: >>> constraint =", "whether or not each target is ever observable in the", "array-like the values that need to be rescaled to be", "is not None and self.max is not None: mask =", "the upper limit on the airmass (``max``) and not the", "# values below 1 should be disregarded return min_best_rescale(secz, mi,", ": tuple A hashable tuple for use as a cache", "airmass (``max``) and not the lower limit. Parameters ---------- max", "whether constraints are met between test times in ``time_range`` by", "along the second. Otherwise, we rely on broadcasting the shapes", "`~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time`", "subtract one from the second number: >>> constraint = PhaseConstraint(binary,", "as ``times`` for whether or not the target is ever", "max=None, min=1, boolean_constraint=True): self.min = min self.max = max self.boolean_constraint", "TimeConstraint(t1,t2) \"\"\" self.min = min self.max = max if self.min", "= np.logical_and.reduce(applied_constraints) else: times_ing = times_ingress_egress[:, 0] times_egr = times_ingress_egress[:,", ": `~astropy.time.Time` (optional) Lower and upper bounds on time sequence", "primary eclipse. \"\"\" def __init__(self, eclipsing_system): \"\"\" Parameters ---------- eclipsing_system", "and some targets. \"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters", "or subclass System on which to compute the phase. For", "`~astroplan.FixedTarget`} Target or list of targets time_range : `~astropy.time.Time` (optional)", "applied_constraints_ing = [constraint(observer, target, times=times_ing, grid_times_targets=True) for constraint in constraints]", "mask class MoonSeparationConstraint(Constraint): \"\"\" Constrain the distance between the Earth's", "limit. ephemeris : str, optional Ephemeris to use. If not", "rely on broadcasting the shapes together using standard numpy rules.", "compute_constraint(self, times, observer, targets): # removed the location argument here,", "the frame of the moon coord # which is GCRS,", "is the max. \"\"\" def __init__(self, min=None, max=None, boolean_constraint=True): if", "# Standard library from abc import ABCMeta, abstractmethod import datetime", "Dictionary containing a key-value pair. 'times' contains the meridian_transit times.", "not None: mask = (self.min <= secz) & (secz <=", "2) Lower and upper bounds on time sequence. time_grid_resolution :", "targets Returns ------- time_dict : dict Dictionary containing a key-value", "`None` indicates no limit. ephemeris : str, optional Ephemeris to", "for solar altitude calculations. This avoids errors in the altitude", "= self.eclipsing_system.in_primary_eclipse(times) return mask class SecondaryEclipseConstraint(Constraint): \"\"\" Constrain observations to", "if max is not None else 1.0 def compute_constraint(self, times,", "time_range : `~astropy.time.Time` (length = 2) Lower and upper bounds", "targets is a string. targkey = (targets,) except BaseException: targkey", "eclipsing_system): \"\"\" Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must", "datetime.time objects.\") if self.max is not None: if not isinstance(self.max,", "illumination for an array of times for ``observer``. Cache the", "# broadcasting times v targets is slow due to #", "a 24 hour period centered on that time. time_grid_resolution :", "observer, targets): separation = abs(targets.transform_to(Galactic).b) if self.min is None and", "(phase >= self.min) & (phase <= self.max), (phase >= self.min)", "= {} aakey = _make_cache_key(times, 'sun') if aakey not in", "if self.min is not None: if not isinstance(self.min, datetime.time): raise", "`~astropy.time.Time` (optional) Lower and upper bounds on time sequence If", "specified in \" \"AirmassConstraint.\") return mask else: if self.max is", "PeriodicEvent >>> from astropy.time import Time >>> import astropy.units as", "numpy arrays of the same shape and # broadcast these", "the constraints. times : `~astropy.time.Time` The times to compute the", "indicates no limit. max : `~astropy.time.Time` Latest time (inclusive). `None`", "rescaled to be between 0 and 1 min_val : float", "with defaults of a minimum of 0.25 and a maximum", "note to future editors - the order matters here #", "__init__(self, min=None, max=None): \"\"\" Parameters ---------- min : `~astropy.units.Quantity` or", "compute_constraint(self, times, observer, targets): cached_altaz = _get_altaz(times, observer, targets) secz", "`~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.25 and a", "= observer.moon_altaz(times) illumination = np.array(moon_illumination(times)) observer._moon_cache[aakey] = dict(times=times, illum=illumination, altaz=altaz)", "b).shape if output_shape != np.array(result).shape: result = np.broadcast_to(result, output_shape) return", "# which should be astropy 1.3.2 moon = get_moon(times, ephemeris=self.ephemeris)", "higher than ``max_val`` should also return one, in some cases", "``time_range`` given constraints in the ``constraints_list`` for a particular ``observer``.", "angle. .. note:: The ``max`` and ``min`` arguments appear in", "set. \"\"\" def __init__(self, min=None, max=None, ephemeris=None): \"\"\" Parameters ----------", "= time_grid_from_range(time_range, time_resolution=time_grid_resolution) if is_24hr_table: tab['time observable'] = tab['fraction of", "\"AirmassConstraint\", \"AtNightConstraint\", \"is_observable\", \"is_always_observable\", \"time_grid_from_range\", \"GalacticLatitudeConstraint\", \"SunSeparationConstraint\", \"MoonSeparationConstraint\", \"MoonIlluminationConstraint\", \"LocalTimeConstraint\",", "u >>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>> constraint =", "for backward compatibility _current_year_time_range = Time( # needed for backward", "the observations to targets that are observable between 2016-03-28 and", ": {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times", "constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.all(constraint_arr, axis=1) def is_observable(constraints, observer,", "and (3) contains the moon illumination for those times. \"\"\"", "The ``max`` and ``min`` arguments appear in the order (max,", "between moon and target (inclusive). `None` indicates no limit. max", "if not isinstance(self.max, datetime.time): raise TypeError(\"Time limits must be specified", "coord # which is GCRS, and that is what we", "(60) going to 1 and worst (35) going to 0.", "timezone = observer.timezone if self.min is not None: min_time =", ">>> # bound times between 23:50 and 04:08 local Hawaiian", "end user # disables gridding and re-shapes the coords themselves", "return mask class MoonIlluminationConstraint(Constraint): \"\"\" Constrain the fractional illumination of", "= self.periodic_event.phase(times) mask = np.where(self.max > self.min, (phase >= self.min)", "is not None: mask = self.max >= moon_separation elif self.max", "equal to ``max_val`` equal 1 Examples -------- rescale an array", "a score (between zero and one), where the ``max_val`` goes", "be within certain time limits. An example use case for", ".target import get_skycoord __all__ = [\"AltitudeConstraint\", \"AirmassConstraint\", \"AtNightConstraint\", \"is_observable\", \"is_always_observable\",", "def twilight_civil(cls, **kwargs): \"\"\" Consider nighttime as time between civil", "\"\"\"Constrain the observing time to be within certain time limits.", "location=observer.location) solar_separation = sun.separation(targets) if self.min is None and self.max", "min_time) or (times.datetime.time() <= max_time)) return mask class TimeConstraint(Constraint): \"\"\"Constrain", "and re-shapes the coords themselves # prior to evaluating multiple", "is None.\") else: mx = self.max mi = 1 if", "observer._meridian_transit_cache[aakey] @abstractmethod class Constraint(object): \"\"\" Abstract class for objects defining", "`~astroplan.Target` The targets on which to apply the constraints. times", "time import warnings # Third-party from astropy.time import Time import", "no limit. boolean_constraint : bool If True, the constraint is", "Time import astropy.units as u from astropy.coordinates import get_body, get_sun,", "dict(times=times, illum=illumination, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return", "cached_altaz['altaz'].alt if self.boolean_constraint: lowermask = self.min <= alt uppermask =", "not isinstance(self.max, Time): raise TypeError(\"Time limits must be specified as", "would result from # broadcasting times and targets against each", "twilight_civil(cls, **kwargs): \"\"\" Consider nighttime as time between civil twilights", "System which must be in secondary eclipse. \"\"\" self.eclipsing_system =", "min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude", "a single (scalar) time, the table will be for a", "below the horizon and the corrections for atmospheric refraction return", "approximated by the secant of the zenith angle. .. note::", ">>> from astroplan import Observer >>> from astropy.time import Time", "the secant of the zenith angle. .. note:: The ``max``", "rescaled so that ``vals`` equal to ``min_val`` equal 0 and", "LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) \"\"\" self.min = min self.max = max if", "targkey def _get_altaz(times, observer, targets, force_zero_pressure=False): \"\"\" Calculate alt/az for", "(min < 0) or (min > 1) or (max <", "given constraints in ``constraints_list`` for a particular ``observer``. Parameters ----------", "not given, use the one set with `~astropy.coordinates.solar_system_ephemeris` (which is", "constraints = [constraints] applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution,", "alt/az coordinates at those times and (3) contains the moon", "constraints] applied_constraints_egr = [constraint(observer, target, times=times_egr, grid_times_targets=True) for constraint in", ": `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints", "\"\"\" Determines which month the specified ``targets`` are observable for", "above 60. >>> from astroplan.constraints import max_best_rescale >>> import numpy", "from time objects, or from observer if self.min is not", "(phase >= self.min) | (phase <= self.max)) return mask def", "given the constraints. \"\"\" if not hasattr(constraints, '__len__'): constraints =", "the requested ``time_range``, given the constraints in ``constraints_list`` for ``observer``.", "array([ 0. , 0. , 0.2, 0.4, 0.8, 1. ])", "Returns ------- cache_key : tuple A hashable tuple for use", "the ``min_val`` goes to one, and the ``max_val`` goes to", "and self.min is not None: mask = self.min <= separation", "as datetime.time objects.\") def compute_constraint(self, times, observer, targets): timezone =", "def __init__(self, eclipsing_system): \"\"\" Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System", "or (times.datetime.time() <= max_time)) return mask class TimeConstraint(Constraint): \"\"\"Constrain the", "1))[:, np.newaxis] else: targets = targets[..., np.newaxis] times, targets =", "observer : `~astroplan.Observer` The observer who has constraints ``constraints``. force_zero_pressure", "is treated as a boolean (True for within the limits", "(inclusive). force_pressure_zero : bool (optional) Force the pressure to zero", "else 0.0 self.max = max if max is not None", "self.min else: min_time = self.min = datetime.time(0, 0, 0) if", "particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational", "for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) else: times_ing =", "return observer._meridian_transit_cache[aakey] @abstractmethod class Constraint(object): \"\"\" Abstract class for objects", "Earth's moon and some targets. \"\"\" def __init__(self, min=None, max=None,", "limits must be specified as \" \"astropy.time.Time objects.\") def compute_constraint(self,", ">= min_time) or (t.time() <= max_time) for t in times.datetime])", "the constraint returns a float on [0, 1], where 0", "between 23:50 and 04:08 local time: >>> from astroplan import", "times try: timekey = tuple(times.jd) + times.shape except BaseException: #", "= 0 # find solar altitude at these times altaz", "\"\"\" if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} # convert", "& (moon_separation <= self.max)) else: raise ValueError(\"No max and/or min", "**kwargs) def _get_solar_altitudes(self, times, observer, targets): if not hasattr(observer, '_altaz_cache'):", "phase (inclusive) on interval [0, 1). Default is zero. max", "self.min = min self.max = max if self.min is None", "= observer.altaz(times, targets, grid_times_targets=False) observer._altaz_cache[aakey] = dict(times=times, altaz=altaz) finally: if", "(-6 degrees). \"\"\" return cls(max_solar_altitude=-6*u.deg, **kwargs) @classmethod def twilight_nautical(cls, **kwargs):", "the horizon and the corrections for atmospheric refraction return nonsense", "those equal to ``min_val`` equal 1 Examples -------- rescale airmasses", "the `~astropy.coordinates.AltAz` frame tends to mishandle negative Parameters ---------- min", "# attribute of the frame data should be unique and", "observer.pressure = observer_old_pressure return observer._moon_cache[aakey] def _get_meridian_transit_times(times, observer, targets): \"\"\"", "True, grids the constraint result with targets along the first", "mask = self.max >= moon_separation elif self.max is None and", "or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates", "max_time = datetime.time(23, 59, 59) # If time limits occur", "``targets`` for whether or not each target is ever observable", "must be scalar timekey = (times.jd,) # make hashable thing", "self.compute_constraint(times, observer, targets) # make sure the output has the", "not all observing blocks are valid over the time limits", "and a maximum of 0.25 Parameters ---------- min : float", "class MoonSeparationConstraint(Constraint): \"\"\" Constrain the distance between the Earth's moon", "entries ``'times'`` (with an array of all the times), ``'observer'``", "be sped up a lot by dropping to the trigonometric", "is 0.5 hours. Returns ------- ever_observable : list List of", "min_time = self.min else: min_time = self.min = datetime.time(0, 0,", "phases between 0.4 and 0.6, >>> from astroplan import PeriodicEvent", "if min is not None else 0.0 self.max = max", "observable'] target_names = [target.name for target in targets] ever_obs =", "on which to apply the constraints. Returns ------- constraint_result :", "specified as datetime.time objects.\") def compute_constraint(self, times, observer, targets): timezone", "the limits and False for outside). If False, the constraint", "all observing blocks are valid over the time limits used", "= cached_moon['illum'] if self.min is None and self.max is not", "if self.max is None else self.max mask = np.logical_and(times >", "\"\"\" Calculate next meridian transit for an array of times", "observer : `~astroplan.Observer` The observer who has constraints ``constraints`` target", "if aakey not in observer._meridian_transit_cache: meridian_transit_times = observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey]", "of sets of unique integers representing each month that a", "is set to 'builtin' by default). \"\"\" self.min = min", "is needed until astropy PR #5897 is released # which", "and self.max is not None: mask = ((self.min <= illumination)", "and 1 is the max. \"\"\" def __init__(self, min=None, max=None,", "max if self.min is None and self.max is None: raise", "def compute_constraint(self, times, observer, targets): with warnings.catch_warnings(): warnings.simplefilter('ignore') min_time =", "constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on", "Observer >>> from astroplan.constraints import LocalTimeConstraint >>> import datetime as", "as time between astronomical twilights (-18 degrees). \"\"\" return cls(max_solar_altitude=-18*u.deg,", "= observer_old_pressure else: altitude = observer._altaz_cache[aakey]['altitude'] return altitude def compute_constraint(self,", "what is returned for ``vals`` above ``max_val``. (in some cases", "Otherwise, we rely on broadcasting the shapes together using standard", "= max if self.min is None and self.max is None:", "If False, the constraint returns a float on [0, 1],", "a higher altitude than airmass=2:: AirmassConstraint(2) \"\"\" def __init__(self, max=None,", "local Hawaiian time >>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) \"\"\" self.min", "times for the computations, (2) 'altaz' contains the corresponding alt/az", "constraints times_ingress_egress : `~astropy.time.Time` (optional) Array of ingress and egress", "the ``time_range`` is given as a scalar. It also contains", "is considered to be \"night\" (inclusive). force_pressure_zero : bool (optional)", "equal to ``min_val`` equal 0 and those equal to ``max_val``", "name'``, ``'ever observable'``, ``'always observable'``, and ``'fraction of time observable'``.", "None: if not isinstance(self.max, Time): raise TypeError(\"Time limits must be", "isinstance(self.max, Time): raise TypeError(\"Time limits must be specified as \"", "spacing ``time_resolution`` for ``observer``. Cache the result on the ``observer``", "or bool The constraints, with targets along the first index", "time_resolution=time_grid_resolution) if is_24hr_table: tab['time observable'] = tab['fraction of time observable']", "constraint_result : 1D or 2D array of float or bool", "constraint for this class Parameters ---------- observer : `~astroplan.Observer` the", "array of times for ``targets`` and ``observer``. Cache the result", "\"better than 2\", i.e. at a higher altitude than airmass=2::", "coords. sun = get_body('sun', times, location=observer.location) solar_separation = sun.separation(targets) if", "airmass is approximated by the secant of the zenith angle.", "illumination) & moon_up_mask elif self.min is not None and self.max", "time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return", "greater_than_max : 0 or 1 what is returned for ``vals``", "above ``max_val``. (in some cases anything higher than ``max_val`` should", "times : `~astropy.time.Time` (optional) Array of times on which to", "first argument into `~astroplan.time_grid_from_range`. If a single (scalar) time, the", "objects defining observational constraints. \"\"\" __metaclass__ = ABCMeta def __call__(self,", "observer._altaz_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0", "class SecondaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times during secondary eclipse.", "corrections for atmospheric refraction return nonsense values. \"\"\" self.max_solar_altitude =", "has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or", "compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_secondary_eclipse(times) return mask class", "targets, force_zero_pressure=False): \"\"\" Calculate alt/az for ``target`` at times linearly", "times : `~astropy.time.Time` The times to compute the constraint. WHAT", "hasattr(targets, 'frame'): # treat as a SkyCoord object. Accessing the", "& uppermask else: return max_best_rescale(alt, self.min, self.max) class AirmassConstraint(AltitudeConstraint): \"\"\"", "``min`` arguments appear in the order (max, min) in this", "for constraint in constraints] applied_constraints_egr = [constraint(observer, target, times=times_egr, grid_times_targets=True)", "in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.all(constraint_arr, axis=1) def is_observable(constraints,", "will be passed as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution", "0.6, 0.2, 0. , 0. ]) \"\"\" rescaled = (vals", "targets) mask = solar_altitude <= self.max_solar_altitude return mask class GalacticLatitudeConstraint(Constraint):", "a table with information about observability for all the ``targets``", "None: self.max = 90*u.deg else: self.max = max self.boolean_constraint =", "`~astroplan.periodic.PeriodicEvent` or subclass System on which to compute the phase.", "time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether", "2, etc. \"\"\" # TODO: This method could be sped", "(separation <= self.max)) else: raise ValueError(\"No max and/or min specified", "contains four columns with information about the target and it's", "import LocalTimeConstraint >>> import datetime as dt >>> subaru =", "= np.array([20, 30, 40, 45, 55, 70]) >>> max_best_rescale(altitudes, 35,", "max_val : float best value cared about (rescales to 1)", "be between 0 and 1, with the best (60) going", "min_val) below = vals < min_val above = vals >", "astropy.time import Time >>> import astropy.units as u >>> binary", "`~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional)", "with warnings.catch_warnings(): warnings.simplefilter('ignore') min_time = Time(\"1950-01-01T00:00:00\") if self.min is None", "observer. # 'get_sun' returns ICRS coords. sun = get_body('sun', times,", "of times for ``observer``. Cache the result on the ``observer``", "else 1.0 def compute_constraint(self, times, observer=None, targets=None): phase = self.periodic_event.phase(times)", "into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified,", "altitudes, as the `~astropy.coordinates.AltAz` frame tends to mishandle negative Parameters", "``times`` and ``targets``. Often, we wish to store expensive calculations", "has constraints ``constraints`` target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times", ": list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The", "not hasattr(constraints, '__len__'): constraints = [constraints] if times is not", "``constraints``). \"\"\" if not hasattr(constraints, '__len__'): constraints = [constraints] is_24hr_table", "values outside the range should return 0. >>> from astroplan.constraints", "and one), where the ``min_val`` goes to one, and the", "observations to times in some range of phases for a", "boolean_constraint=True): self.min = min self.max = max self.boolean_constraint = boolean_constraint", "table will be for a 24 hour period centered on", ": list List of booleans of same length as ``targets``", "as np >>> altitudes = np.array([20, 30, 40, 45, 55,", "or list of targets. Returns ------- cache_key : tuple A", ">= min_time) or (times.datetime.time() <= max_time)) return mask class TimeConstraint(Constraint):", "linearly spaced between the two times in ``time_range`` with grid", "\"\"\" if not hasattr(constraints, '__len__'): constraints = [constraints] is_24hr_table =", "of float or bool The constraints. If 2D with targets", "than accessing the ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape", "def __init__(self, min=None, max=None): \"\"\" Parameters ---------- min : `~astropy.time.Time`", "observer, targets) secz = cached_altaz['altaz'].secz.value if self.boolean_constraint: if self.min is", "astropy 1.3.2 moon = get_moon(times, ephemeris=self.ephemeris) # note to future", "a tuple from times try: timekey = tuple(times.jd) + times.shape", "uppermask = alt <= self.max return lowermask & uppermask else:", "max is None.\") else: mx = self.max mi = 1", "less_than_min rescaled[above] = 0 return rescaled def max_best_rescale(vals, min_val, max_val,", "altitude and 1 is the max. \"\"\" def __init__(self, min=None,", "times is None and time_range is not None: times =", "limits used in calls to `is_observable` or `is_always_observable`. \"\"\" def", "and maximum phase must be described on the interval [0,", "that a target is observable, one set per target. These", ": `~astroplan.periodic.PeriodicEvent` or subclass System on which to compute the", "\"\"\" Parameters ---------- min : `~datetime.time` Earliest local time (inclusive).", "for huge speedup if the end user # disables gridding", "the observer. # 'get_sun' returns ICRS coords. sun = get_body('sun',", "Examples -------- To constrain observations on orbital phases between 0.4", "values that need to be rescaled to be between 0", "zenith angle. .. note:: The ``max`` and ``min`` arguments appear", "maximum phase must be described on the interval [0, 1).", "times for ``targets`` and ``observer``. Cache the result on the", "target. .. note:: This can misbehave if you try to", "grids the constraint result with targets along the first index", "# it returns the Sun's coordinates in an observer #", "Constrain the fractional illumination of the Earth's moon. Constraint is", "else: # assume targets is a string. targkey = (targets,)", "as ``targets`` for whether or not each target is observable", "``time_range`` given constraints in ``constraints_list`` for a particular ``observer``. Parameters", "or not the target is ever observable at each time,", "make a tuple from times try: timekey = tuple(times.jd) +", "\"\"\" if not hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache = {} # convert", "Maximum acceptable separation between Sun and target (inclusive). `None` indicates", "altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._altaz_cache[aakey] def", "\"\"\" Constrain the observable hours. \"\"\" def __init__(self, min=None, max=None):", "``constraints``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s)", "and target (inclusive). `None` indicates no limit. \"\"\" self.min =", "= observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey] @abstractmethod class", "those times. \"\"\" if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {}", "frame data should be unique and is # quicker than", "self.max mi = 1 if self.min is None else self.min", "1 is the max. \"\"\" def __init__(self, min=None, max=None, boolean_constraint=True):", "min self.max = max def compute_constraint(self, times, observer, targets): separation", "raise TypeError(\"Time limits must be specified as \" \"astropy.time.Time objects.\")", "get_skycoord __all__ = [\"AltitudeConstraint\", \"AirmassConstraint\", \"AtNightConstraint\", \"is_observable\", \"is_always_observable\", \"time_grid_from_range\", \"GalacticLatitudeConstraint\",", "1). To constrain observations on orbital phases between 0.6 and", "throughout ``time_range`` given constraints in the ``constraints_list`` for a particular", "= cached_altaz['altaz'].alt if self.boolean_constraint: lowermask = self.min <= alt uppermask", "= Observer.at_site(\"Subaru\") >>> t1 = Time(\"2016-03-28T12:00:00\") >>> t2 = Time(\"2016-03-30T12:00:00\")", "sequence. time_grid_resolution : `~astropy.units.quantity` Time-grid spacing grid_times_targets : bool if", "`None` indicates no limit. min : float or `None` Minimum", "from astroplan import Observer >>> from astroplan.constraints import LocalTimeConstraint >>>", "to test the constraint time_range : `~astropy.time.Time` (optional) Lower and", "`~astropy.time.Time` Array of times on which to test the constraint", "altitude than airmass=2:: AirmassConstraint(2) \"\"\" def __init__(self, max=None, min=1, boolean_constraint=True):", "self.min <= alt uppermask = alt <= self.max return lowermask", "`~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.65 and no", "\"is_event_observable\"] _current_year = time.localtime().tm_year # needed for backward compatibility _current_year_time_range", "max=None, boolean_constraint=True): if min is None: self.min = -90*u.deg else:", "Constrain the distance between the Earth's moon and some targets.", "moon_separation elif self.min is not None and self.max is not", "is # quicker than accessing the ra attribute. targkey =", "maximum time.\") if self.min is not None: if not isinstance(self.min,", "0.5 hours. Returns ------- observability_table : `~astropy.table.Table` A Table containing", "observer, force_zero_pressure=False): \"\"\" Calculate moon altitude az and illumination for", "------- constraint_result : 1D or 2D array of float or", "ABCMeta, abstractmethod import datetime import time import warnings # Third-party", "= altaz.alt # cache the altitude observer._altaz_cache[aakey] = dict(times=times, altitude=altitude)", "+ times.shape except BaseException: # must be scalar timekey =", "lowermask & uppermask else: return max_best_rescale(alt, self.min, self.max) class AirmassConstraint(AltitudeConstraint):", "time_grid_resolution=0.5*u.hour): \"\"\" Determines which month the specified ``targets`` are observable", "or `None` (optional) Minimum acceptable separation between moon and target", "= set([t.datetime.month for t in times[observable]]) months_observable.append(s) return months_observable def", "doctest: +FLOAT_CMP array([ 0. , 0. , 0.2, 0.4, 0.8,", "else: min_time = self.min = datetime.time(0, 0, 0) if self.max", "Parameters ---------- periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass System on which", "array of times for ``observer``. Cache the result on the", "must be specified as datetime.time objects.\") def compute_constraint(self, times, observer,", "constraint returns a float on [0, 1], where 0 is", "one set per target. These integers are 1-based so that", "moon. Constraint is also satisfied if the Moon has set.", "which to test the constraint. targets : `~astropy.coordinates.SkyCoord` Target or", "Minimum acceptable Galactic latitude of target (inclusive). `None` indicates no", "self.min is not None: if not isinstance(self.min, Time): raise TypeError(\"Time", "fractional illumination (inclusive). `None` indicates no limit. max : float", "on the interval [0, 1). To constrain observations on orbital", "disables gridding and re-shapes the coords themselves # prior to", "= min self.max = max def compute_constraint(self, times, observer, targets):", "astronomical twilights (-18 degrees). \"\"\" return cls(max_solar_altitude=-18*u.deg, **kwargs) def _get_solar_altitudes(self,", "need to be rescaled to be between 0 and 1", "not None: mask = self.min <= solar_separation elif self.min is", "moon.separation(targets) is NOT the same as targets.separation(moon) # the former", "be an eclipsing or non-eclipsing binary, or exoplanet system. min", "is as-seen # by the observer. # 'get_sun' returns ICRS", "timezone=\"US/Hawaii\") >>> # bound times between 23:50 and 04:08 local", "= as_strided(x, shape=shp2, strides=[0] * len(shp2)) output_shape = np.broadcast(a, b).shape", "& (solar_separation <= self.max)) else: raise ValueError(\"No max and/or min", "= self.max else: max_time = datetime.time(23, 59, 59) # If", "targets=None): phase = self.periodic_event.phase(times) mask = np.where(self.max > self.min, (phase", "def __call__(self, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour, grid_times_targets=False): \"\"\" Compute", "interval [0, 1). Default is zero. max : float (optional)", "upper limit on the airmass (``max``) and not the lower", "& (phase <= self.max), (phase >= self.min) | (phase <=", "def is_always_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" A function", "tab['time observable'] = tab['fraction of time observable'] * 24*u.hour tab.meta['times']", "(optional) Minimum acceptable separation between Sun and target (inclusive). `None`", "t.time() <= max_time for t in times.datetime]) except BaseException: #", "output has the same shape as would result from #", "Often, we wish to store expensive calculations for a combination", "contains the times for the computations, (2) 'altaz' contains the", "Parameters ---------- min : `~astropy.units.Quantity` or `None` Minimum altitude of", "outside). If False, the constraint returns a float on [0,", "which to apply the constraints targets : sequence of `~astroplan.Target`", "the time range given the constraints. \"\"\" if not hasattr(constraints,", "<= self.max)) & moon_up_mask else: raise ValueError(\"No max and/or min", "= _make_cache_key(times, 'sun') if aakey not in observer._altaz_cache: try: if", "None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if grid_times_targets: targets = get_skycoord(targets)", "<= secz) & (secz <= self.max) else: raise ValueError(\"No max", "in zip(targets, constraint_arr): s = set([t.datetime.month for t in times[observable]])", "mask = self.min <= secz elif self.min is not None", "self.eclipsing_system.in_primary_eclipse(times) return mask class SecondaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times", "equal 0 and those equal to ``max_val`` equal 1 Examples", "an array of times for ``targets`` and ``observer``. Cache the", "1 should be disregarded return min_best_rescale(secz, mi, mx, less_than_min=0) class", "(inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None`", "if self.max is not None: max_time = self.max else: max_time", "self.max) class AirmassConstraint(AltitudeConstraint): \"\"\" Constrain the airmass of a target.", "if output_shape != np.array(result).shape: result = np.broadcast_to(result, output_shape) return result", "Returns ------- constraint_result : 2D array of float or bool", "\"\"\" from __future__ import (absolute_import, division, print_function, unicode_literals) # Standard", "along the first index and times along the second. \"\"\"", "min if min is not None else 0.0 self.max =", "sequence If ``time_range`` is not specified, defaults to current year", "np.where(self.max > self.min, (phase >= self.min) & (phase <= self.max),", "dict(times=times, altitude=altitude) finally: if self.force_pressure_zero: observer.pressure = observer_old_pressure else: altitude", "the target is ever observable at each time, given the", "\"\"\" def __init__(self, max=None, min=1, boolean_constraint=True): self.min = min self.max", "``observer``, given the supplied ``constraints``. Parameters ---------- constraints : list", "<= illumination) & (illumination <= self.max)) & moon_up_mask else: raise", "observable, one set per target. These integers are 1-based so", "between astronomical twilights (-18 degrees). \"\"\" return cls(max_solar_altitude=-18*u.deg, **kwargs) def", "2, 3, 0]) >>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0)", "observer, targets): # removed the location argument here, which causes", "to 1, February maps to 2, etc. \"\"\" # TODO:", "times.shape, targets.shape x = np.array([1]) a = as_strided(x, shape=shp1, strides=[0]", "the separation is as-seen # by the observer. # 'get_sun'", "max_solar_altitude : `~astropy.units.Quantity` The altitude of the sun below which", "`~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets. observer : `~astroplan.Observer`", "lowermask = self.min <= alt uppermask = alt <= self.max", "binaries). \"\"\" def __init__(self, periodic_event, min=None, max=None): \"\"\" Parameters ----------", "self.min = min if min is not None else 0.0", "self.max = max if max is not None else 1.0", "`~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with", "< max_time: try: mask = np.array([min_time <= t.time() <= max_time", "None: applied_constraints = [constraint(observer, target, times=times, grid_times_targets=True) for constraint in", "set([t.datetime.month for t in times[observable]]) months_observable.append(s) return months_observable def observability_table(constraints,", "------- observability_table : `~astropy.table.Table` A Table containing the observability information", "observable'``, ``'always observable'``, and ``'fraction of time observable'``. The column", "the values that need to be rescaled to be between", ": `~astropy.units.Quantity` or `None` Maximum altitude of the target (inclusive).", "# must be scalar timekey = (times.jd,) # make hashable", "if the Moon has set. \"\"\" def __init__(self, min=None, max=None,", "and those equal to ``max_val`` equal 1 Examples -------- rescale", "@abstractmethod class Constraint(object): \"\"\" Abstract class for objects defining observational", "if min_time < max_time: try: mask = np.array([min_time <= t.time()", "If 2D with targets along the first index and times", "tuple(times.jd) + times.shape except BaseException: # must be scalar timekey", "if the ``targets`` are observable during ``time_range`` given constraints in", "from astroplan.constraints import min_best_rescale >>> import numpy as np >>>", "self.min is None else self.min # values below 1 should", "with targets along the first index and times along the", "`None` indicates no limit. Examples -------- Constrain the observations to", ": float or `None` (optional) Minimum acceptable fractional illumination (inclusive).", "from astropy.coordinates import get_body, get_sun, get_moon, Galactic, SkyCoord from astropy", "grid_times_targets: targets = get_skycoord(targets) # TODO: these broadcasting operations are", "moon_alt >= 0 illumination = cached_moon['illum'] if self.min is None", "linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns", "observer._moon_cache[aakey] def _get_meridian_transit_times(times, observer, targets): \"\"\" Calculate next meridian transit", "\"\"\" Constrain the Sun to be below ``horizon``. \"\"\" @u.quantity_input(horizon=u.deg)", "next meridian transit for an array of times for ``targets``", "coordinates in an observer # centred frame, so the separation", "return observer._moon_cache[aakey] def _get_meridian_transit_times(times, observer, targets): \"\"\" Calculate next meridian", "which month the specified ``targets`` are observable for a specific", "indicates no limit. min : float or `None` Minimum airmass", "the computations, (2) 'altaz' contains the corresponding alt/az coordinates at", "here # moon.separation(targets) is NOT the same as targets.separation(moon) #", "for all the ``targets`` over the requested ``time_range``, given the", "that ``vals`` equal to ``min_val`` equal 0 and those equal", "in \" \"GalacticLatitudeConstraint.\") return mask class SunSeparationConstraint(Constraint): \"\"\" Constrain the", "must be in primary eclipse. \"\"\" self.eclipsing_system = eclipsing_system def", "not isinstance(self.min, datetime.time): raise TypeError(\"Time limits must be specified as", "observer, targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): \"\"\" Determines which month the specified", "BOTH TIMES AND TIME_RANGE ARE SET? time_range : `~astropy.time.Time` (length", "observability for all the ``targets`` over the requested ``time_range``, given", "all the ``targets`` over the requested ``time_range``, given the constraints", "limits must be specified as datetime.time objects.\") if self.max is", "attribute of the frame data should be unique and is", "return max_best_rescale(alt, self.min, self.max) class AirmassConstraint(AltitudeConstraint): \"\"\" Constrain the airmass", "a = as_strided(x, shape=shp1, strides=[0] * len(shp1)) b = as_strided(x,", "= [target.name for target in targets] ever_obs = np.any(constraint_arr, axis=1)", ": `~astropy.time.Time` (length = 2) Lower and upper bounds on", "compute_constraint(self, times, observer, targets): with warnings.catch_warnings(): warnings.simplefilter('ignore') min_time = Time(\"1950-01-01T00:00:00\")", "= np.bool_(min_time <= times.datetime.time() <= max_time) # If time boundaries", "the constraint. targets : `~astropy.coordinates.SkyCoord` Target or list of targets.", "(optional) Array of ingress and egress times for ``N`` events,", "result = self.compute_constraint(times, observer, targets) # make sure the output", "constraints. \"\"\" if not hasattr(constraints, '__len__'): constraints = [constraints] applied_constraints", ">>> constraint = PhaseConstraint(binary, min=0.6, max=0.2) \"\"\" self.periodic_event = periodic_event", "(1) and worst (2.25). All values outside the range should", "(phase <= self.max), (phase >= self.min) | (phase <= self.max))", "vals : array-like the values that need to be rescaled", "from targets coords try: if hasattr(targets, 'frame'): # treat as", "and upper bounds on time sequence If ``time_range`` is not", "secz <= self.max elif self.max is None and self.min is", "max if max is not None else 1.0 def compute_constraint(self,", "targets, grid_times_targets=False) observer._altaz_cache[aakey] = dict(times=times, altaz=altaz) finally: if force_zero_pressure: observer.pressure", "use 0 pressure. Returns ------- altaz_dict : dict Dictionary containing", "times during secondary eclipse. \"\"\" def __init__(self, eclipsing_system): \"\"\" Parameters", "not in observer._moon_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure", "for ``N`` events, with shape (``N``, 2). Returns ------- event_observable", "along the second. \"\"\" if times is None and time_range", "warnings.catch_warnings(): warnings.simplefilter('ignore') min_time = Time(\"1950-01-01T00:00:00\") if self.min is None else", "time (inclusive). `None` indicates no limit. Examples -------- Constrain the", "altitude calculations. applied_constraints = [constraint(observer, targets, times=times, grid_times_targets=True) for constraint", "at these times altaz = observer.altaz(times, get_sun(times)) altitude = altaz.alt", "on broadcasting the shapes together using standard numpy rules. Returns", "or `None` Minimum altitude of the target (inclusive). `None` indicates", "return altitude def compute_constraint(self, times, observer, targets): solar_altitude = self._get_solar_altitudes(times,", "np.sum(constraint_arr, axis=1) / constraint_arr.shape[1] tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs,", ".moon import moon_illumination from .utils import time_grid_from_range from .target import", "in constraints] applied_constraints_egr = [constraint(observer, target, times=times_egr, grid_times_targets=True) for constraint", "is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if is_24hr_table: tab['time", "_make_cache_key(times, targets) if aakey not in observer._meridian_transit_cache: meridian_transit_times = observer.target_meridian_transit_time(times,", "= tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: # assume targets is a", "_make_cache_key(times, 'moon') if aakey not in observer._moon_cache: try: if force_zero_pressure:", "`~astroplan.Target` The targets on which to apply the constraints. Returns", "cases it should return zero) Returns ------- array of floats", "each other if targets is not None: # broadcasting times", "1.2, for example, you should subtract one from the second", "`None` indicates no limit. max : `~astropy.units.Quantity` or `None` Maximum", "min) in this initializer to support the common case for", "by dropping to the trigonometric # altitude calculations. applied_constraints =", "max=None): \"\"\" Parameters ---------- min : `~astropy.time.Time` Earliest time (inclusive).", "= ((self.min <= illumination) & (illumination <= self.max)) & moon_up_mask", "time, given the constraints. \"\"\" if not hasattr(constraints, '__len__'): constraints", "the moon coord # which is GCRS, and that is", "observer who has constraints ``constraints``. force_zero_pressure : bool Forcefully use", "returns a float on [0, 1], where 0 is the", "vals > max_val rescaled[below] = 0 rescaled[above] = greater_than_max return", "np from numpy.lib.stride_tricks import as_strided # Package from .moon import", "``'target name'``, ``'ever observable'``, ``'always observable'``, and ``'fraction of time", "ever observable at each time, given the constraints. \"\"\" if", "is zero. max : float (optional) Maximum phase (inclusive) on", "of targets Returns ------- time_dict : dict Dictionary containing a", "booleans of same length as ``times`` for whether or not", "here, which causes small <1 deg # innacuracies, but it", "see LICENSE.rst \"\"\" Specify and constraints to determine which targets", "indicates no limit. \"\"\" self.min = min self.max = max", "0 pressure. Returns ------- moon_dict : dict Dictionary containing three", "can misbehave if you try to constrain negative altitudes, as", "`None` (optional) Minimum acceptable separation between Sun and target (inclusive).", ": dict Dictionary containing two key-value pairs. (1) 'times' contains", "2\", i.e. at a higher altitude than airmass=2:: AirmassConstraint(2) \"\"\"", "altaz_dict : dict Dictionary containing two key-value pairs. (1) 'times'", "= get_skycoord(targets) # TODO: these broadcasting operations are relatively slow", ">>> subaru = Observer.at_site(\"Subaru\", timezone=\"US/Hawaii\") >>> # bound times between", "by default). \"\"\" self.min = min self.max = max self.ephemeris", "astropy.coordinates import get_body, get_sun, get_moon, Galactic, SkyCoord from astropy import", "Constraint(object): \"\"\" Abstract class for objects defining observational constraints. \"\"\"", "np.broadcast(a, b).shape if output_shape != np.array(result).shape: result = np.broadcast_to(result, output_shape)", "times.datetime tab.meta['observer'] = observer tab.meta['constraints'] = constraints return tab def", "1, with the best (1) and worst (2.25). All values", "tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: # assume targets is a string.", "of a minimum of 0.25 and a maximum of 0.65", "(max_val - min_val) below = vals < min_val above =", "Consider nighttime as time between civil twilights (-6 degrees). \"\"\"", "or list of targets. observer : `~astroplan.Observer` The observer who", "in \" \"AirmassConstraint.\") return mask else: if self.max is None:", "specified in \" \"SunSeparationConstraint.\") return mask class MoonSeparationConstraint(Constraint): \"\"\" Constrain", "times to compute the constraint. WHAT HAPPENS WHEN BOTH TIMES", ">= self.min) & (phase <= self.max), (phase >= self.min) |", ">= separation elif self.max is None and self.min is not", "import Time >>> import astropy.units as u >>> binary =", "return cls(min, max, **kwargs) @classmethod def grey(cls, min=0.25, max=0.65, **kwargs):", "standard numpy rules. Returns ------- constraint_result : 1D or 2D", "contains the corresponding alt/az coordinates at those times and (3)", "\"\"\" Consider nighttime as time between astronomical twilights (-18 degrees).", "self.max = max def compute_constraint(self, times, observer, targets): # use", "self.min = min self.max = max self.boolean_constraint = boolean_constraint def", "= (targets,) except BaseException: targkey = (targets.frame.data.lon,) return timekey +", "ephemeris=self.ephemeris) # note to future editors - the order matters", "[0, 1). Default is one. Examples -------- To constrain observations", "else: mx = self.max mi = 1 if self.min is", "get_moon(times, ephemeris=self.ephemeris) # note to future editors - the order", "the times for the alt/az computations, (2) 'altaz' contains the", "applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint", "but it is needed until astropy PR #5897 is released", "each month that a target is observable, one set per", "A function to determine whether ``targets`` are always observable throughout", "1 inclusive rescaled so that ``vals`` equal to ``min_val`` equal", "Dictionary containing three key-value pairs. (1) 'times' contains the times", "None: if not isinstance(self.min, Time): raise TypeError(\"Time limits must be", "and self.max is not None: mask = self.max >= moon_separation", "self._get_solar_altitudes(times, observer, targets) mask = solar_altitude <= self.max_solar_altitude return mask", "time_grid_resolution : `~astropy.units.quantity` Time-grid spacing grid_times_targets : bool if True,", "is specified, determine whether constraints are met between test times", "times on which to test the constraint time_range : `~astropy.time.Time`", "always observable throughout ``time_range`` given constraints in the ``constraints_list`` for", "observer_old_pressure = observer.pressure observer.pressure = 0 altaz = observer.altaz(times, targets,", "so that January maps to 1, February maps to 2,", "these calculations in a dictionary. Parameters ---------- times : `~astropy.time.Time`", "the fractional illumination of the Earth's moon. Constraint is also", "< 0) or (max > 1): raise ValueError('The minimum of", "times to tuple for hashing aakey = _make_cache_key(times, 'moon') if", "in the altitude of the Sun that can occur when", "import as_strided # Package from .moon import moon_illumination from .utils", "min : `~astropy.units.Quantity` or `None` Minimum altitude of the target", "None and self.min is not None: mask = (self.min <=", "self.max.tzinfo if timezone is None: timezone = observer.timezone if self.min", "min=None, max=None): \"\"\" Parameters ---------- periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass", "0) or (min > 1) or (max < 0) or", "during ``time_range`` given constraints in ``constraints_list`` for a particular ``observer``.", "[constraint(observer, target, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr =", "the supplied ``constraints``). \"\"\" if not hasattr(constraints, '__len__'): constraints =", "SkyCoord object. Accessing the longitude # attribute of the frame", "for backward compatibility [str(_current_year) + '-01-01', str(_current_year) + '-12-31'] )", "v targets is slow due to # complex nature of", "eclipse. \"\"\" def __init__(self, eclipsing_system): \"\"\" Parameters ---------- eclipsing_system :", "passed as the first argument into `~astroplan.time_grid_from_range`. If a single", "These integers are 1-based so that January maps to 1,", "0. >>> from astroplan.constraints import min_best_rescale >>> import numpy as", "PR #5897 is released # which should be astropy 1.3.2", "be astropy 1.3.2 moon = get_moon(times, ephemeris=self.ephemeris) # note to", "targets): \"\"\" Make a unique key to reference this combination", "(``N``, 2). Returns ------- event_observable : `~numpy.ndarray` Array of booleans", "target is ever observable at each time, given the constraints.", "targets) if aakey not in observer._altaz_cache: try: if force_zero_pressure: observer_old_pressure", "= [\"AltitudeConstraint\", \"AirmassConstraint\", \"AtNightConstraint\", \"is_observable\", \"is_always_observable\", \"time_grid_from_range\", \"GalacticLatitudeConstraint\", \"SunSeparationConstraint\", \"MoonSeparationConstraint\",", "max=0.6) The minimum and maximum phase must be described on", "\"\"\" # TODO: This method could be sped up a", "`~astroplan.Observer` object), and ``'constraints'`` (containing the supplied ``constraints``). \"\"\" if", "0.8, 1. ]) \"\"\" rescaled = (vals - min_val) /", "of a minimum of 0.65 and no maximum Parameters ----------", "(optional) Maximum phase (inclusive) on interval [0, 1). Default is", "time limits. An example use case for this class would", "= np.broadcast(a, b).shape if output_shape != np.array(result).shape: result = np.broadcast_to(result,", "huge speedup if the end user # disables gridding and", "worst (2.25). All values outside the range should return 0.", "# quicker than accessing the ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel())", "be passed as the first argument into `~astroplan.time_grid_from_range`. If a", "or (min > 1) or (max < 0) or (max", "1-based so that January maps to 1, February maps to", "relatively slow # but there is potential for huge speedup", "= datetime.time(0, 0, 0) if self.max is not None: max_time", "limit on the airmass (``max``) and not the lower limit.", "constraint_arr = np.logical_and.reduce(applied_constraints) else: times_ing = times_ingress_egress[:, 0] times_egr =", "or 2D array of float or bool The constraints. If", "not None: mask = ((self.min <= separation) & (separation <=", "t in times.datetime]) except BaseException: mask = np.bool_((times.datetime.time() >= min_time)", "moon illumination for those times. \"\"\" if not hasattr(observer, '_moon_cache'):", "# but there is potential for huge speedup if the", "target. `None` indicates no limit. min : float or `None`", "ever observable in the time range given the constraints. \"\"\"", "targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Creates a table with information", "> max_val rescaled[below] = 0 rescaled[above] = greater_than_max return rescaled", "a unique key to reference this combination of ``times`` and", "same length as ``targets`` for whether or not each target", "is not specified, defaults to current year (localtime) time_grid_resolution :", "hasattr(time_range, 'isscalar') and time_range.isscalar: time_range = (time_range-12*u.hour, time_range+12*u.hour) is_24hr_table =", ": `~datetime.time` Earliest local time (inclusive). `None` indicates no limit.", "compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_primary_eclipse(times) return mask class", "self.max is not None: mask = (self.max >= illumination) |", "from .utils import time_grid_from_range from .target import get_skycoord __all__ =", "Moon has set. \"\"\" def __init__(self, min=None, max=None, ephemeris=None): \"\"\"", ": `~astropy.time.Time` Latest time (inclusive). `None` indicates no limit. Examples", "unique key to reference this combination of ``times`` and ``targets``.", "\"\"\" Parameters ---------- max_solar_altitude : `~astropy.units.Quantity` The altitude of the", "store expensive calculations for a combination of ``targets`` and ``times``", "scalar. It also contains metadata entries ``'times'`` (with an array", "of the target. `None` indicates no limit. boolean_contstraint : bool", "compute the constraint. WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE", "TypeError(\"Time limits must be specified as datetime.time objects.\") if self.max", "case for this class would be to associate an acceptable", "compatibility [str(_current_year) + '-01-01', str(_current_year) + '-12-31'] ) def _make_cache_key(times,", "so the separation is as-seen # by the observer. #", "the constraint is treated as a boolean (True for within", "of the sun below which it is considered to be", "# centred frame, so the separation is as-seen # by", "observer.moon_altaz(times) illumination = np.array(moon_illumination(times)) observer._moon_cache[aakey] = dict(times=times, illum=illumination, altaz=altaz) finally:", "observer, targets) mask = solar_altitude <= self.max_solar_altitude return mask class", "a minimum of 0.65 and no maximum Parameters ---------- min", "eclipsing_system def compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_secondary_eclipse(times) return", "SunSeparationConstraint(Constraint): \"\"\" Constrain the distance between the Sun and some", "or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates", "considered to be \"night\" (inclusive). force_pressure_zero : bool (optional) Force", "separation = abs(targets.transform_to(Galactic).b) if self.min is None and self.max is", "in the frame of the moon coord # which is", "minimum of 0.25 and a maximum of 0.65 Parameters ----------", "= np.any(constraint_arr, axis=1) always_obs = np.all(constraint_arr, axis=1) frac_obs = np.sum(constraint_arr,", "(optional) Force the pressure to zero for solar altitude calculations.", "(1) 'times' contains the times for the computations, (2) 'altaz'", "of all the times), ``'observer'`` (the `~astroplan.Observer` object), and ``'constraints'``", "these broadcasting operations are relatively slow # but there is", "in times[observable]]) months_observable.append(s) return months_observable def observability_table(constraints, observer, targets, times=None,", "the target and it's observability: ``'target name'``, ``'ever observable'``, ``'always", "to ``max_val`` equal 1 Examples -------- rescale an array of", "# use np.bool so shape queries don't cause problems mask", "as \" \"astropy.time.Time objects.\") if self.max is not None: if", "(optional) Lower and upper bounds on time sequence, with spacing", "limit. Parameters ---------- max : float or `None` Maximum airmass", "observing blocks are valid over the time limits used in", "the end user # disables gridding and re-shapes the coords", "targets) if aakey not in observer._meridian_transit_cache: meridian_transit_times = observer.target_meridian_transit_time(times, targets)", "Sun and some targets. \"\"\" def __init__(self, min=None, max=None): \"\"\"", "to compute the constraint. WHAT HAPPENS WHEN BOTH TIMES AND", "be passed as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution :", "This routine will provide an appropriate, hashable, key to store", "Compute the constraint for this class Parameters ---------- observer :", "it returns the Sun's coordinates in an observer # centred", "which must be in secondary eclipse. \"\"\" self.eclipsing_system = eclipsing_system", "except BaseException: targkey = (targets.frame.data.lon,) return timekey + targkey def", "times and (3) contains the moon illumination for those times.", "[constraint(observer, target, times=times_egr, grid_times_targets=True) for constraint in constraints] constraint_arr =", "+ '-01-01', str(_current_year) + '-12-31'] ) def _make_cache_key(times, targets): \"\"\"", "should be unique and is # quicker than accessing the", "class SunSeparationConstraint(Constraint): \"\"\" Constrain the distance between the Sun and", "None: timezone = observer.timezone if self.min is not None: min_time", "and the ``max_val`` goes to zero. Parameters ---------- vals :", "license - see LICENSE.rst \"\"\" Specify and constraints to determine", "True applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for", "initializer to support the common case for users who care", "def __init__(self, min=None, max=None): \"\"\" Parameters ---------- min : `~datetime.time`", "is None and self.max is not None: mask = self.max", "system could be an eclipsing or non-eclipsing binary, or exoplanet", "constraint_arr): s = set([t.datetime.month for t in times[observable]]) months_observable.append(s) return", "below ``horizon``. \"\"\" @u.quantity_input(horizon=u.deg) def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True): \"\"\" Parameters", "together using standard numpy rules. Returns ------- constraint_result : 1D", "observer, targets) alt = cached_altaz['altaz'].alt if self.boolean_constraint: lowermask = self.min", "= observer.altaz(times, get_sun(times)) altitude = altaz.alt # cache the altitude", "= dict(times=times, illum=illumination, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure", "max=None): \"\"\" Parameters ---------- min : `~datetime.time` Earliest local time", "eclipsing binaries). \"\"\" def __init__(self, periodic_event, min=None, max=None): \"\"\" Parameters", "1. , 0.6, 0.2, 0. , 0. ]) \"\"\" rescaled", "0) or (max > 1): raise ValueError('The minimum of the", "* 24*u.hour tab.meta['times'] = times.datetime tab.meta['observer'] = observer tab.meta['constraints'] =", "and self.max is not None: mask = self.max >= separation", "observer_old_pressure return observer._moon_cache[aakey] def _get_meridian_transit_times(times, observer, targets): \"\"\" Calculate next", "0 and those equal to ``max_val`` equal 1 Examples --------", "Parameters ---------- times : `~astropy.time.Time` The times to compute the", "backward compatibility [str(_current_year) + '-01-01', str(_current_year) + '-12-31'] ) def", "information about observability for all the ``targets`` over the requested", "return rescaled def max_best_rescale(vals, min_val, max_val, greater_than_max=1): \"\"\" rescales an", "def compute_constraint(self, times, observer, targets): # first is the moon", "min is not None else 0.0 self.max = max if", "in observer._meridian_transit_cache: meridian_transit_times = observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times) return", "should return 0. >>> from astroplan.constraints import min_best_rescale >>> import", "self.min is not None: mask = self.min <= separation elif", "np.newaxis] times, targets = observer._preprocess_inputs(times, targets, grid_times_targets=False) result = self.compute_constraint(times,", "coordinates at those times and (3) contains the moon illumination", "return timekey + targkey def _get_altaz(times, observer, targets, force_zero_pressure=False): \"\"\"", "(2) 'altaz' contains the corresponding alt/az coordinates at those times", "`~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between moon and", "observer, targets): if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} aakey", "interval [0, 1). To constrain observations on orbital phases between", "to determine whether ``targets`` are always observable throughout ``time_range`` given", "``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``.", "<= max_time) for t in times.datetime]) except BaseException: mask =", "**kwargs): \"\"\" Consider nighttime as time between astronomical twilights (-18", "of altitudes to be between 0 and 1, with the", "as dt >>> subaru = Observer.at_site(\"Subaru\", timezone=\"US/Hawaii\") >>> # bound", "0 pressure. Returns ------- altaz_dict : dict Dictionary containing two", "constraints are met between test times in ``time_range`` by checking", "the ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: #", "specific ``observer``, given the supplied ``constraints``. Parameters ---------- constraints :", ": list List of sets of unique integers representing each", "constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) colnames = ['target name',", "altitude of the target. .. note:: This can misbehave if", "ICRS coords. sun = get_body('sun', times, location=observer.location) solar_separation = sun.separation(targets)", "one), where the ``min_val`` goes to one, and the ``max_val``", "the times for the computations, (2) 'altaz' contains the corresponding", "= observer.timezone if self.min is not None: min_time = self.min", "reference this combination of ``times`` and ``targets``. Often, we wish", "observer # centred frame, so the separation is as-seen #", "# removed the location argument here, which causes small <1", "targets=None): mask = self.eclipsing_system.in_secondary_eclipse(times) return mask class PhaseConstraint(Constraint): \"\"\" Constrain", "PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>> constraint = PhaseConstraint(binary, min=0.4, max=0.6) The", "shape and # broadcast these to find the correct shape", "'__len__'): constraints = [constraints] times = time_grid_from_range(time_range, time_grid_resolution) # TODO:", "bool If True, the constraint is treated as a boolean", "else: raise ValueError(\"No max and/or min specified in \" \"GalacticLatitudeConstraint.\")", "alt = cached_altaz['altaz'].alt if self.boolean_constraint: lowermask = self.min <= alt", "an appropriate, hashable, key to store these calculations in a", "or a \" \"maximum time.\") if self.min is not None:", "= PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>> constraint = PhaseConstraint(binary, min=0.4, max=0.6)", "times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if grid_times_targets: targets = get_skycoord(targets) #", ": sequence of `~astroplan.Target` The targets on which to apply", "if self.min is not None: if not isinstance(self.min, Time): raise", "s = set([t.datetime.month for t in times[observable]]) months_observable.append(s) return months_observable", "`None` (optional) Minimum acceptable separation between moon and target (inclusive).", "The targets on which to apply the constraints. Returns -------", "ephemeris=None): \"\"\" Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional)", "returned for ``vals`` below ``min_val``. (in some cases anything less", "``max_val`` equal 1 Examples -------- rescale an array of altitudes", "``times``, given constraints in ``constraints`` for a particular ``observer``. Parameters", "targets : `~astropy.coordinates.SkyCoord` Target or list of targets. Returns -------", "binary, or exoplanet system. min : float (optional) Minimum phase", "below = vals < min_val above = vals > max_val", "be a score (between zero and one), where the ``max_val``", "for ``target`` at times linearly spaced between the two times", "force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0 altaz = observer.moon_altaz(times)", "times[observable]]) months_observable.append(s) return months_observable def observability_table(constraints, observer, targets, times=None, time_range=None,", "or list of targets Returns ------- time_dict : dict Dictionary", "is None: self.min = -90*u.deg else: self.min = min if", "0 is the min altitude and 1 is the max.", "latitude of target (inclusive). `None` indicates no limit. max :", "meridian transit for an array of times for ``targets`` and", "minimum and a maximum of 0.25 Parameters ---------- min :", "as time between civil twilights (-6 degrees). \"\"\" return cls(max_solar_altitude=-6*u.deg,", "for use as a cache key \"\"\" # make a", "times : `~astropy.time.Time` The times to compute the constraint observer", ": `~astroplan.Observer` the observation location from which to apply the", "bright(cls, min=0.65, max=None, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults", "targets): \"\"\" Calculate next meridian transit for an array of", "AtNightConstraint(Constraint): \"\"\" Constrain the Sun to be below ``horizon``. \"\"\"", ">= moon_separation elif self.max is None and self.min is not", "{} # convert times to tuple for hashing aakey =", "observations on orbital phases between 0.6 and 1.2, for example,", "`~astroplan.time_grid_from_range`. If a single (scalar) time, the table will be", "about the upper limit on the airmass (``max``) and not", "calculations. This avoids errors in the altitude of the Sun", "`~astroplan.periodic.EclipsingSystem` System which must be in secondary eclipse. \"\"\" self.eclipsing_system", "upper bounds on time sequence. time_grid_resolution : `~astropy.units.quantity` Time-grid spacing", "which to apply the constraints. times : `~astropy.time.Time` The times", "import PeriodicEvent >>> from astropy.time import Time >>> import astropy.units", "of the target. `None` indicates no limit. min : float", "the observing time to be within certain time limits. An", "is NOT the same as targets.separation(moon) # the former calculates", "<= max_time for t in times.datetime]) except BaseException: # use", "times, location=observer.location) solar_separation = sun.separation(targets) if self.min is None and", "time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time", "= self.compute_constraint(times, observer, targets) # make sure the output has", ": `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between Sun", "'-01-01', str(_current_year) + '-12-31'] ) def _make_cache_key(times, targets): \"\"\" Make", "\"\"\" A function to determine whether ``targets`` are always observable", ": float or `None` Maximum airmass of the target. `None`", "time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Determines if the ``targets`` are observable during", "times.datetime.time() <= max_time) # If time boundaries straddle midnight: else:", "quicker than accessing the ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) +", "None and self.max is not None: mask = self.max >=", "1 what is returned for ``vals`` below ``min_val``. (in some", "range should return 0. >>> from astroplan.constraints import min_best_rescale >>>", "least supply either a minimum or a \" \"maximum time.\")", "for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) months_observable = []", "about the target and it's observability: ``'target name'``, ``'ever observable'``,", "max and/or min specified in \" \"MoonSeparationConstraint.\") return mask class", "def _get_moon_data(times, observer, force_zero_pressure=False): \"\"\" Calculate moon altitude az and", "_get_moon_data(times, observer, force_zero_pressure=False): \"\"\" Calculate moon altitude az and illumination", "WHEN BOTH TIMES AND TIME_RANGE ARE SET? time_range : `~astropy.time.Time`", "tab def min_best_rescale(vals, min_val, max_val, less_than_min=1): \"\"\" rescales an input", "mask = solar_altitude <= self.max_solar_altitude return mask class GalacticLatitudeConstraint(Constraint): \"\"\"", "import get_skycoord __all__ = [\"AltitudeConstraint\", \"AirmassConstraint\", \"AtNightConstraint\", \"is_observable\", \"is_always_observable\", \"time_grid_from_range\",", "make sure the output has the same shape as would", "airmass of the target. `None` indicates no limit. boolean_contstraint :", "ephemeris @classmethod def dark(cls, min=None, max=0.25, **kwargs): \"\"\" initialize a", "else: return max_best_rescale(alt, self.min, self.max) class AirmassConstraint(AltitudeConstraint): \"\"\" Constrain the", "combination of ``times`` and ``targets``. Often, we wish to store", "- max_val) below = vals < min_val above = vals", "mask = self.max >= solar_separation elif self.max is None and", "going to 0. For values outside the range, the rescale", "and self.min is not None: mask = self.min <= secz", "times, observer, targets): \"\"\" Actually do the real work of", "is None and self.max is not None: mask = secz", "None: mask = (self.max >= illumination) | moon_down_mask elif self.max", "TIME_RANGE ARE SET? time_range : `~astropy.time.Time` (length = 2) Lower", "frame, so the separation is as-seen # by the observer.", "None: if not isinstance(self.max, datetime.time): raise TypeError(\"Time limits must be", "illumination = cached_moon['illum'] if self.min is None and self.max is", "observer._altaz_cache[aakey] = dict(times=times, altitude=altitude) finally: if self.force_pressure_zero: observer.pressure = observer_old_pressure", "or non-eclipsing binary, or exoplanet system. min : float (optional)", "length as ``targets`` for whether or not each target is", "scalar timekey = (times.jd,) # make hashable thing from targets", "return 0 below 35 and 1 above 60. >>> from", "with `~astropy.coordinates.solar_system_ephemeris` (which is set to 'builtin' by default). \"\"\"", "atmospheric refraction return nonsense values. \"\"\" self.max_solar_altitude = max_solar_altitude self.force_pressure_zero", "best (60) going to 1 and worst (35) going to", "compatibility _current_year_time_range = Time( # needed for backward compatibility [str(_current_year)", "constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list", "not None: if not isinstance(self.min, Time): raise TypeError(\"Time limits must", "periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass System on which to compute", "0 or 1 what is returned for ``vals`` below ``min_val``.", "test the constraints times_ingress_egress : `~astropy.time.Time` (optional) Array of ingress", "if is_24hr_table: tab['time observable'] = tab['fraction of time observable'] *", "If time boundaries straddle midnight: else: try: mask = np.array([(t.time()", "requested ``time_range``, given the constraints in ``constraints_list`` for ``observer``. Parameters", "raise ValueError(\"No max and/or min specified in \" \"MoonSeparationConstraint.\") return", "import time_grid_from_range from .target import get_skycoord __all__ = [\"AltitudeConstraint\", \"AirmassConstraint\",", "`~astropy.table.Table` A Table containing the observability information for each of", "\"GalacticLatitudeConstraint\", \"SunSeparationConstraint\", \"MoonSeparationConstraint\", \"MoonIlluminationConstraint\", \"LocalTimeConstraint\", \"PrimaryEclipseConstraint\", \"SecondaryEclipseConstraint\", \"Constraint\", \"TimeConstraint\", \"observability_table\",", "def compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_primary_eclipse(times) return mask", "max : `~datetime.time` Latest local time (inclusive). `None` indicates no", "columns with information about the target and it's observability: ``'target", ": bool Examples -------- To create a constraint that requires", ">= illumination) | moon_down_mask elif self.max is None and self.min", "`~astropy.time.Time` Earliest time (inclusive). `None` indicates no limit. max :", "the airmass be \"better than 2\", i.e. at a higher", "self.min) & (phase <= self.max), (phase >= self.min) | (phase", "`~numpy.ndarray` Array of booleans of same length as ``times`` for", "self.max is not None: if not isinstance(self.max, datetime.time): raise TypeError(\"Time", "Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer", "max def compute_constraint(self, times, observer, targets): # use get_body rather", "targets to tuple for hashing aakey = _make_cache_key(times, targets) if", "separation between Sun and target (inclusive). `None` indicates no limit.", "min=0.25, max=0.65, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of", "event (e.g.~transiting exoplanets, eclipsing binaries). \"\"\" def __init__(self, periodic_event, min=None,", "``min_val``. (in some cases anything less than ``min_val`` should also", "binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>> constraint = PhaseConstraint(binary, min=0.4,", "pressure to zero for solar altitude calculations. This avoids errors", "between 2016-03-28 and 2016-03-30: >>> from astroplan import Observer >>>", "list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer", ": `~astropy.table.Table` A Table containing the observability information for each", "library from abc import ABCMeta, abstractmethod import datetime import time", "> max_val rescaled[below] = less_than_min rescaled[above] = 0 return rescaled", "observer.pressure observer.pressure = 0 altaz = observer.altaz(times, targets, grid_times_targets=False) observer._altaz_cache[aakey]", "specified, determine whether constraints are met between test times in", "apply the constraints. Returns ------- constraint_result : 2D array of", "This can be useful if not all observing blocks are", "on which to apply the constraints. times : `~astropy.time.Time` The", "\"\"\" def __init__(self, min=None, max=None, boolean_constraint=True): if min is None:", "arguments appear in the order (max, min) in this initializer", "datetime.time(23, 59, 59) # If time limits occur on same", "mask = (self.min <= illumination) & moon_up_mask elif self.min is", "force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0 altaz = observer.altaz(times,", "time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints)", "indicates no limit. boolean_constraint : bool If True, the constraint", "greater_than_max=1): \"\"\" rescales an input array ``vals`` to be a", "targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: # assume targets is", "are observable between 2016-03-28 and 2016-03-30: >>> from astroplan import", "\"\"\" def __init__(self, min=None, max=None, ephemeris=None): \"\"\" Parameters ---------- min", "self.min = min self.max = max self.ephemeris = ephemeris def", "straddle midnight: else: try: mask = np.array([(t.time() >= min_time) or", "equal 0 and those equal to ``min_val`` equal 1 Examples", ": float best value cared about (rescales to 1) less_than_min", "from numpy.lib.stride_tricks import as_strided # Package from .moon import moon_illumination", "self.min is None else self.min max_time = Time(\"2120-01-01T00:00:00\") if self.max", "< 0 moon_up_mask = moon_alt >= 0 illumination = cached_moon['illum']", "aakey not in observer._altaz_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure", "cached_moon = _get_moon_data(times, observer) moon_alt = cached_moon['altaz'].alt moon_down_mask = moon_alt", "(inclusive). `None` indicates no limit. max : `~astropy.time.Time` Latest time", "= self.min <= secz elif self.min is not None and", "is None and self.max is not None: mask = (self.max", "= np.logical_and.reduce(applied_constraints) colnames = ['target name', 'ever observable', 'always observable',", "not in observer._altaz_cache: try: if self.force_pressure_zero: observer_old_pressure = observer.pressure observer.pressure", "best value cared about (rescales to 1) less_than_min : 0", "same as targets.separation(moon) # the former calculates the separation in", ": `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between moon", "return observer._altaz_cache[aakey] def _get_moon_data(times, observer, force_zero_pressure=False): \"\"\" Calculate moon altitude", "midnight: else: try: mask = np.array([(t.time() >= min_time) or (t.time()", "``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s)", "constraints] constraint_arr = np.logical_and.reduce(applied_constraints) else: times_ing = times_ingress_egress[:, 0] times_egr", "to 1) less_than_min : 0 or 1 what is returned", "times, targets to tuple for hashing aakey = _make_cache_key(times, targets)", "def is_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Determines if", "None: mask = ((self.min <= separation) & (separation <= self.max))", "if self.boolean_constraint: if self.min is None and self.max is not", "constrain observations on orbital phases between 0.4 and 0.6, >>>", "self.min) | (phase <= self.max)) return mask def is_always_observable(constraints, observer,", "= self.max >= separation elif self.max is None and self.min", "either a minimum or a \" \"maximum time.\") if self.min", "is_always_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" A function to", "that January maps to 1, February maps to 2, etc.", "to apply the constraints. times : `~astropy.time.Time` The times to", "these times altaz = observer.altaz(times, get_sun(times)) altitude = altaz.alt #", "self.eclipsing_system = eclipsing_system def compute_constraint(self, times, observer=None, targets=None): mask =", "= moon_alt >= 0 illumination = cached_moon['illum'] if self.min is", "to store expensive calculations for a combination of ``targets`` and", "constraint_arr = np.logical_and.reduce(applied_constraints) months_observable = [] for target, observable in", "``max_val`` goes to zero. Parameters ---------- vals : array-like the", "limit. \"\"\" return cls(min, max, **kwargs) @classmethod def grey(cls, min=0.25,", "and ``times`` in a cache on an ``observer``` object. This", "the airmass (``max``) and not the lower limit. Parameters ----------", "observer : `~astroplan.Observer` the observaton location from which to apply", "if self.max is not None: if not isinstance(self.max, Time): raise", "to 1) greater_than_max : 0 or 1 what is returned", "separation elif self.max is None and self.min is not None:", "Lower and upper bounds on time sequence, with spacing ``time_resolution``.", "\"PhaseConstraint\", \"is_event_observable\"] _current_year = time.localtime().tm_year # needed for backward compatibility", "= moon_alt < 0 moon_up_mask = moon_alt >= 0 illumination", "finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._altaz_cache[aakey] def _get_moon_data(times,", "{list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets Returns -------", "civil twilights (-6 degrees). \"\"\" return cls(max_solar_altitude=-6*u.deg, **kwargs) @classmethod def", "targets) observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey] @abstractmethod class Constraint(object): \"\"\"", "prior to evaluating multiple constraints. if targets.isscalar: # ensure we", "ValueError(\"No max and/or min specified in \" \"GalacticLatitudeConstraint.\") return mask", "23:50 and 04:08 local Hawaiian time >>> constraint = LocalTimeConstraint(min=dt.time(23,50),", "0] times_egr = times_ingress_egress[:, 1] applied_constraints_ing = [constraint(observer, target, times=times_ing,", "unique integers representing each month that a target is observable,", "minimum or a maximum time.\") if self.min is not None:", "due to # complex nature of these objects. We make", "is below the horizon and the corrections for atmospheric refraction", "max : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude", "in primary eclipse. \"\"\" self.eclipsing_system = eclipsing_system def compute_constraint(self, times,", "0.4, 0.8, 1. ]) \"\"\" rescaled = (vals - min_val)", "the former calculates the separation in the frame of the", "exoplanets, eclipsing binaries). \"\"\" def __init__(self, periodic_event, min=None, max=None): \"\"\"", "min : `~datetime.time` Earliest local time (inclusive). `None` indicates no", "of time observable'] * 24*u.hour tab.meta['times'] = times.datetime tab.meta['observer'] =", "between Sun and target (inclusive). `None` indicates no limit. max", "arrays of the same shape and # broadcast these to", "(optional) Minimum acceptable Galactic latitude of target (inclusive). `None` indicates", "timezone from time objects, or from observer if self.min is", "use np.bool so shape queries don't cause problems mask =", "is None else self.max mask = np.logical_and(times > min_time, times", "February maps to 2, etc. \"\"\" # TODO: This method", "the real work of computing the constraint. Subclasses override this.", "frame tends to mishandle negative Parameters ---------- min : `~astropy.units.Quantity`", "calculations. applied_constraints = [constraint(observer, targets, times=times, grid_times_targets=True) for constraint in", "if self.min is not None: timezone = self.min.tzinfo elif self.max", "above = vals > max_val rescaled[below] = less_than_min rescaled[above] =", "always_obs, frac_obs]) if times is None and time_range is not", ": `~astroplan.Observer` the observaton location from which to apply the", "list of targets Returns ------- time_dict : dict Dictionary containing", "class Parameters ---------- observer : `~astroplan.Observer` the observation location from", "in secondary eclipse. \"\"\" self.eclipsing_system = eclipsing_system def compute_constraint(self, times,", "table import numpy as np from numpy.lib.stride_tricks import as_strided #", "target is ever observable in the time range given the", "a cache on an ``observer``` object. This routine will provide", "times.shape except BaseException: # must be scalar timekey = (times.jd,)", "indicates no limit. max : float or `None` (optional) Maximum", "times altaz = observer.altaz(times, get_sun(times)) altitude = altaz.alt # cache", "for ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational", "60) # doctest: +FLOAT_CMP array([ 0. , 0. , 0.2,", "times on which to test the constraint observer : `~astroplan.Observer`", "apply the constraints targets : sequence of `~astroplan.Target` The targets", "here, since # it returns the Sun's coordinates in an", "object), and ``'constraints'`` (containing the supplied ``constraints``). \"\"\" if not", "= [constraint(observer, target, times=times_ing, grid_times_targets=True) for constraint in constraints] applied_constraints_egr", "np.all(constraint_arr, axis=1) frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1] tab =", "computations, (2) 'altaz' contains the corresponding alt/az coordinates at those", "constraint = TimeConstraint(t1,t2) \"\"\" self.min = min self.max = max", "(phase <= self.max)) return mask def is_always_observable(constraints, observer, targets, times=None,", "determine whether ``targets`` are always observable throughout ``time_range`` given constraints", "constraints in the ``constraints_list`` for a particular ``observer``. Parameters ----------", "object. Parameters ---------- times : `~astropy.time.Time` Array of times on", "which to test the constraint observer : `~astroplan.Observer` The observer", ": {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets time_range", "be scalar timekey = (times.jd,) # make hashable thing from", "operations are relatively slow # but there is potential for", "the Earth's moon and some targets. \"\"\" def __init__(self, min=None,", "be present if the ``time_range`` is given as a scalar.", ": `~astropy.time.Time` (optional) Array of mid-event times on which to", "score (between zero and one), where the ``max_val`` goes to", "**kwargs) @classmethod def bright(cls, min=0.65, max=None, **kwargs): \"\"\" initialize a", "times : `~astropy.time.Time` (optional) Array of mid-event times on which", "appear in the order (max, min) in this initializer to", "class AtNightConstraint(Constraint): \"\"\" Constrain the Sun to be below ``horizon``.", "shape as would result from # broadcasting times and targets", "events, with shape (``N``, 2). Returns ------- event_observable : `~numpy.ndarray`", "from astropy.time import Time >>> subaru = Observer.at_site(\"Subaru\") >>> t1", "specific observing block. This can be useful if not all", "\"\"\" rescaled = (vals - min_val) / (max_val - min_val)", "Latest local time (inclusive). `None` indicates no limit. Examples --------", "<= max_time) # If time boundaries straddle midnight: else: try:", "grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) colnames =", "some cases it should return zero) Returns ------- array of", "targets): # first is the moon up? cached_moon = _get_moon_data(times,", "pairs. (1) 'times' contains the times for the alt/az computations,", "[constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints]", "the constraint observer : `~astroplan.Observer` The observer who has constraints", "second. \"\"\" if times is None and time_range is not", ": `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude of", "if times is not None: applied_constraints = [constraint(observer, target, times=times,", "def compute_constraint(self, times, observer=None, targets=None): phase = self.periodic_event.phase(times) mask =", "secondary eclipse. \"\"\" def __init__(self, eclipsing_system): \"\"\" Parameters ---------- eclipsing_system", ": float best value cared about (rescales to 1) greater_than_max", "Dictionary containing two key-value pairs. (1) 'times' contains the times", "= tuple(times.jd) + times.shape except BaseException: # must be scalar", "not None: timezone = self.max.tzinfo if timezone is None: timezone", "astroplan import Observer >>> from astroplan.constraints import LocalTimeConstraint >>> import", "which to test the constraints times_ingress_egress : `~astropy.time.Time` (optional) Array", "None.\") else: mx = self.max mi = 1 if self.min", "None else 0.0 self.max = max if max is not", "------- altaz_dict : dict Dictionary containing two key-value pairs. (1)", "40, 45, 55, 70]) >>> max_best_rescale(altitudes, 35, 60) # doctest:", "a minimum or a maximum time.\") if self.min is not", "max : float or `None` Maximum airmass of the target.", "`~astropy.coordinates.SkyCoord` Target or list of targets. Returns ------- cache_key :", "acceptable separation between Sun and target (inclusive). `None` indicates no", "Calculate moon altitude az and illumination for an array of", "None: # broadcasting times v targets is slow due to", "compute_constraint(self, times, observer, targets): timezone = None # get timezone", "= None # get timezone from time objects, or from", "**kwargs): \"\"\" Consider nighttime as time between civil twilights (-6", "0.65 and no maximum Parameters ---------- min : float or", "supply either a minimum or a \" \"maximum time.\") if", "the target. `None` indicates no limit. boolean_contstraint : bool Examples", "self.eclipsing_system.in_secondary_eclipse(times) return mask class PhaseConstraint(Constraint): \"\"\" Constrain observations to times", "= self.min <= alt uppermask = alt <= self.max return", "during secondary eclipse. \"\"\" def __init__(self, eclipsing_system): \"\"\" Parameters ----------", "this combination of ``times`` and ``targets``. Often, we wish to", "[0, 1). To constrain observations on orbital phases between 0.6", "``constraints_list`` for ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint`", "the constraint for this class Parameters ---------- observer : `~astroplan.Observer`", "that time. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified,", "0.6, >>> from astroplan import PeriodicEvent >>> from astropy.time import", "moon = get_moon(times, ephemeris=self.ephemeris) # note to future editors -", "be rescaled to be between 0 and 1 min_val :", "\"months_observable\", \"max_best_rescale\", \"min_best_rescale\", \"PhaseConstraint\", \"is_event_observable\"] _current_year = time.localtime().tm_year # needed", "times for ``N`` events, with shape (``N``, 2). Returns -------", "lot by dropping to the trigonometric # altitude calculations. applied_constraints", "def twilight_nautical(cls, **kwargs): \"\"\" Consider nighttime as time between nautical", "np.array(moon_illumination(times)) observer._moon_cache[aakey] = dict(times=times, illum=illumination, altaz=altaz) finally: if force_zero_pressure: observer.pressure", "spacing grid_times_targets : bool if True, grids the constraint result", "max_solar_altitude self.force_pressure_zero = force_pressure_zero @classmethod def twilight_civil(cls, **kwargs): \"\"\" Consider", ": {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times : `~astropy.time.Time` (optional) Array", "coord targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis] else: targets = targets[...,", "`~datetime.time` Earliest local time (inclusive). `None` indicates no limit. max", "to associate an acceptable time range with a specific observing", "`None` Maximum airmass of the target. `None` indicates no limit.", "illumination) | moon_down_mask elif self.max is None and self.min is", "``time_resolution``. This will be passed as the first argument into", "= max def compute_constraint(self, times, observer, targets): # use get_body", "Constrain the altitude of the target. .. note:: This can", "no limit. min : float or `None` Minimum airmass of", "= Time(\"2016-03-30T12:00:00\") >>> constraint = TimeConstraint(t1,t2) \"\"\" self.min = min", "some cases anything higher than ``max_val`` should also return one,", "targets.isscalar: # ensure we have a (1, 1) shape coord", "datetime import time import warnings # Third-party from astropy.time import", "thing from targets coords try: if hasattr(targets, 'frame'): # treat", ": `~astropy.time.Time` (optional) Lower and upper bounds on time sequence,", "Parameters ---------- max : float or `None` Maximum airmass of", "<= moon_separation) & (moon_separation <= self.max)) else: raise ValueError(\"No max", "`~astropy.units.Quantity` or `None` Maximum altitude of the target (inclusive). `None`", "Parameters ---------- vals : array-like the values that need to", "1 Examples -------- rescale an array of altitudes to be", "with grid spacing ``time_resolution`` for ``observer``. Cache the result on", "False for outside). If False, the constraint returns a float", "times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if is_24hr_table: tab['time observable'] = tab['fraction", "from observer if self.min is not None: timezone = self.min.tzinfo", "Licensed under a 3-clause BSD style license - see LICENSE.rst", "for the computations, (2) 'altaz' contains the corresponding alt/az coordinates", "observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey] @abstractmethod class Constraint(object): \"\"\" Abstract", "mask class SunSeparationConstraint(Constraint): \"\"\" Constrain the distance between the Sun", "time >>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) \"\"\" self.min = min", "be specified as \" \"astropy.time.Time objects.\") if self.max is not", "the ``targets``. The table contains four columns with information about", "np.array([1, 1.5, 2, 3, 0]) >>> min_best_rescale(airmasses, 1, 2.25, less_than_min", "+FLOAT_CMP array([ 1. , 0.6, 0.2, 0. , 0. ])", "Parameters ---------- times : `~astropy.time.Time` Array of times on which", "min=None, max=None): \"\"\" Parameters ---------- min : `~datetime.time` Earliest local", "between 0 and 1, with the best (60) going to", "three key-value pairs. (1) 'times' contains the times for the", "in a dictionary. Parameters ---------- times : `~astropy.time.Time` Array of", "is not None: mask = ((self.min <= moon_separation) & (moon_separation", "targets.separation(moon) # the former calculates the separation in the frame", "time_range is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if is_24hr_table:", "observation location from which to apply the constraints targets :", ": `~astroplan.periodic.EclipsingSystem` System which must be in primary eclipse. \"\"\"", "mask class PrimaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times during primary", "a periodic event (e.g.~transiting exoplanets, eclipsing binaries). \"\"\" def __init__(self,", "times on which to test the constraint. targets : `~astropy.coordinates.SkyCoord`", "airmass=2:: AirmassConstraint(2) \"\"\" def __init__(self, max=None, min=1, boolean_constraint=True): self.min =", "return mask class PhaseConstraint(Constraint): \"\"\" Constrain observations to times in", "target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times : `~astropy.time.Time` (optional)", "table.Table(names=colnames, data=[target_names, ever_obs, always_obs, frac_obs]) if times is None and", "[constraint(observer, target, times=times_ing, grid_times_targets=True) for constraint in constraints] applied_constraints_egr =", "``max_val`` goes to one, and the ``min_val`` goes to zero.", "cared about (rescales to 1) greater_than_max : 0 or 1", "get_moon, Galactic, SkyCoord from astropy import table import numpy as", "the first index and times along the second. \"\"\" if", "\"\"\" Constrain observations to times during primary eclipse. \"\"\" def", "PhaseConstraint must be within' ' the interval [0, 1).') self.min", "= np.logical_and.reduce(applied_constraints) return np.any(constraint_arr, axis=1) def is_event_observable(constraints, observer, target, times=None,", "value (rescales to 0) max_val : float best value cared", "- min_val) / (max_val - min_val) below = vals <", "((self.min <= separation) & (separation <= self.max)) else: raise ValueError(\"No", "goes to one, and the ``max_val`` goes to zero. Parameters", "\"\"\" Constrain the distance between the Earth's moon and some", "``targets`` and ``observer``. Cache the result on the ``observer`` object.", ">>> from astroplan.constraints import LocalTimeConstraint >>> import datetime as dt", "the distance between the Sun and some targets. \"\"\" def", "2016-03-30: >>> from astroplan import Observer >>> from astropy.time import", "illumination for those times. \"\"\" if not hasattr(observer, '_moon_cache'): observer._moon_cache", "observer, targets, force_zero_pressure=False): \"\"\" Calculate alt/az for ``target`` at times", "and time_range is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if", "force_pressure_zero : bool (optional) Force the pressure to zero for", "The observer who has constraints ``constraints``. force_zero_pressure : bool Forcefully", "re-shapes the coords themselves # prior to evaluating multiple constraints.", "this initializer to support the common case for users who", "**kwargs) def compute_constraint(self, times, observer, targets): # first is the", "specified as datetime.time objects.\") if self.max is not None: if", "raise ValueError('The minimum of the PhaseConstraint must be within' '", "**kwargs): \"\"\" Consider nighttime as time between nautical twilights (-12", "pressure. Returns ------- altaz_dict : dict Dictionary containing two key-value", "PhaseConstraint(Constraint): \"\"\" Constrain observations to times in some range of", ": `~astropy.time.Time` The times to compute the constraint observer :", "if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} # convert times,", "trigonometric # altitude calculations. if not hasattr(constraints, '__len__'): constraints =", "not hasattr(constraints, '__len__'): constraints = [constraints] is_24hr_table = False if", "will provide an appropriate, hashable, key to store these calculations", "under a 3-clause BSD style license - see LICENSE.rst \"\"\"", "constraints] constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return constraint_arr def months_observable(constraints, observer,", "get sun here, since # it returns the Sun's coordinates", "than 2\", i.e. at a higher altitude than airmass=2:: AirmassConstraint(2)", "specified in \" \"MoonSeparationConstraint.\") return mask class LocalTimeConstraint(Constraint): \"\"\" Constrain", "in \" \"SunSeparationConstraint.\") return mask class MoonSeparationConstraint(Constraint): \"\"\" Constrain the", "ever_obs, always_obs, frac_obs]) if times is None and time_range is", "constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.any(constraint_arr, axis=1) def is_event_observable(constraints, observer,", "from __future__ import (absolute_import, division, print_function, unicode_literals) # Standard library", "the constraint. observer : `~astroplan.Observer` The observer who has constraints", "# 'get_sun' returns ICRS coords. sun = get_body('sun', times, location=observer.location)", "containing a key-value pair. 'times' contains the meridian_transit times. \"\"\"", "= False if hasattr(time_range, 'isscalar') and time_range.isscalar: time_range = (time_range-12*u.hour,", "\"MoonSeparationConstraint.\") return mask class LocalTimeConstraint(Constraint): \"\"\" Constrain the observable hours.", "except BaseException: # must be scalar timekey = (times.jd,) #", "return mask def is_always_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\"", "constraints = [constraints] if times is not None: applied_constraints =", "calculations in a dictionary. Parameters ---------- times : `~astropy.time.Time` Array", "editors - the order matters here # moon.separation(targets) is NOT", "mid-event times on which to test the constraints times_ingress_egress :", "ephemeris=None): \"\"\" Parameters ---------- min : float or `None` (optional)", "bound times between 23:50 and 04:08 local Hawaiian time >>>", "None else self.min # values below 1 should be disregarded", "_get_solar_altitudes(self, times, observer, targets): if not hasattr(observer, '_altaz_cache'): observer._altaz_cache =", "if not hasattr(constraints, '__len__'): constraints = [constraints] applied_constraints = [constraint(observer,", "Consider nighttime as time between astronomical twilights (-18 degrees). \"\"\"", "# broadcast these to find the correct shape shp1, shp2", "of targets. Returns ------- cache_key : tuple A hashable tuple", "If ``time_range`` is specified, determine whether constraints are met between", "a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.65 and", "as the first argument into `~astroplan.time_grid_from_range`. If a single (scalar)", "sun.separation(targets) if self.min is None and self.max is not None:", "about observability for all the ``targets`` over the requested ``time_range``,", "key-value pairs. (1) 'times' contains the times for the alt/az", "ephemeris : str, optional Ephemeris to use. If not given,", ": float worst acceptable value (rescales to 0) max_val :", "constraint in constraints] applied_constraints_egr = [constraint(observer, target, times=times_egr, grid_times_targets=True) for", "and self.max is not None: mask = self.max >= solar_separation", "Third-party from astropy.time import Time import astropy.units as u from", "self.max is not None: mask = ((self.min <= illumination) &", "that need to be rescaled to be between 0 and", "min_best_rescale >>> import numpy as np >>> airmasses = np.array([1,", "datetime as dt >>> subaru = Observer.at_site(\"Subaru\", timezone=\"US/Hawaii\") >>> #", "if True, grids the constraint result with targets along the", "time_grid_from_range(time_range, time_resolution=time_grid_resolution) if grid_times_targets: targets = get_skycoord(targets) # TODO: these", "the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of", "a (1, 1) shape coord targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis]", "try to constrain negative altitudes, as the `~astropy.coordinates.AltAz` frame tends", "max : float or `None` (optional) Maximum acceptable fractional illumination", "\"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of", "second number: >>> constraint = PhaseConstraint(binary, min=0.6, max=0.2) \"\"\" self.periodic_event", "\"\"\" self.periodic_event = periodic_event if (min < 0) or (min", "= self.max >= moon_separation elif self.max is None and self.min", "gridding and re-shapes the coords themselves # prior to evaluating", "as_strided(x, shape=shp2, strides=[0] * len(shp2)) output_shape = np.broadcast(a, b).shape if", "# Should be implemented on each subclass of Constraint raise", "targets) alt = cached_altaz['altaz'].alt if self.boolean_constraint: lowermask = self.min <=", "is None and self.min is not None: mask = (self.min", "method could be sped up a lot by dropping to", "np.logical_and.reduce(applied_constraints) months_observable = [] for target, observable in zip(targets, constraint_arr):", "= less_than_min rescaled[above] = 0 return rescaled def max_best_rescale(vals, min_val,", "TODO: This method could be sped up a lot by", "observer, targets): timezone = None # get timezone from time", "constraint. WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE SET?", "<= self.max elif self.max is None and self.min is not", "self.min # values below 1 should be disregarded return min_best_rescale(secz,", "len(shp2)) output_shape = np.broadcast(a, b).shape if output_shape != np.array(result).shape: result", "Default is 0.5 hours. Returns ------- observable_months : list List", "time.localtime().tm_year # needed for backward compatibility _current_year_time_range = Time( #", "time_range is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if grid_times_targets:", "Maximum altitude of the target (inclusive). `None` indicates no limit.", "= min if max is None: self.max = 90*u.deg else:", "four columns with information about the target and it's observability:", "/ constraint_arr.shape[1] tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs, frac_obs]) if", "constraints return tab def min_best_rescale(vals, min_val, max_val, less_than_min=1): \"\"\" rescales", "from times try: timekey = tuple(times.jd) + times.shape except BaseException:", "slow # but there is potential for huge speedup if", "def max_best_rescale(vals, min_val, max_val, greater_than_max=1): \"\"\" rescales an input array", "altitude of the sun below which it is considered to", "If time limits occur on same day: if min_time <", "axis=1) def is_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Determines", "used in calls to `is_observable` or `is_always_observable`. \"\"\" def __init__(self,", "observer.altaz(times, get_sun(times)) altitude = altaz.alt # cache the altitude observer._altaz_cache[aakey]", "timekey + targkey def _get_altaz(times, observer, targets, force_zero_pressure=False): \"\"\" Calculate", "cached_altaz['altaz'].secz.value if self.boolean_constraint: if self.min is None and self.max is", "against each other if targets is not None: # broadcasting", "get_sun(times)) altitude = altaz.alt # cache the altitude observer._altaz_cache[aakey] =", "mask = np.array([(t.time() >= min_time) or (t.time() <= max_time) for", "``target`` is observable at each time in ``times``, given constraints", ">>> from astropy.time import Time >>> import astropy.units as u", "TypeError(\"Time limits must be specified as \" \"astropy.time.Time objects.\") def", "must be specified as \" \"astropy.time.Time objects.\") def compute_constraint(self, times,", "(inclusive) on interval [0, 1). Default is zero. max :", "\"Constraint\", \"TimeConstraint\", \"observability_table\", \"months_observable\", \"max_best_rescale\", \"min_best_rescale\", \"PhaseConstraint\", \"is_event_observable\"] _current_year =", "BaseException: targkey = (targets.frame.data.lon,) return timekey + targkey def _get_altaz(times,", "no limit. max : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable", "targets[..., np.newaxis] times, targets = observer._preprocess_inputs(times, targets, grid_times_targets=False) result =", "raise ValueError(\"No max and/or min specified in \" \"GalacticLatitudeConstraint.\") return", "None: if not isinstance(self.min, datetime.time): raise TypeError(\"Time limits must be", "return min_best_rescale(secz, mi, mx, less_than_min=0) class AtNightConstraint(Constraint): \"\"\" Constrain the", "Returns ------- event_observable : `~numpy.ndarray` Array of booleans of same", "return np.all(constraint_arr, axis=1) def is_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour):", "aakey = _make_cache_key(times, targets) if aakey not in observer._altaz_cache: try:", "\"is_always_observable\", \"time_grid_from_range\", \"GalacticLatitudeConstraint\", \"SunSeparationConstraint\", \"MoonSeparationConstraint\", \"MoonIlluminationConstraint\", \"LocalTimeConstraint\", \"PrimaryEclipseConstraint\", \"SecondaryEclipseConstraint\", \"Constraint\",", "targets on which to apply the constraints. times : `~astropy.time.Time`", "(the `~astroplan.Observer` object), and ``'constraints'`` (containing the supplied ``constraints``). \"\"\"", "the zenith angle. .. note:: The ``max`` and ``min`` arguments", "if self.force_pressure_zero: observer_old_pressure = observer.pressure observer.pressure = 0 # find", "(max > 1): raise ValueError('The minimum of the PhaseConstraint must", "between moon and target (inclusive). `None` indicates no limit. ephemeris", "of phases for a periodic event (e.g.~transiting exoplanets, eclipsing binaries).", "tab.meta['constraints'] = constraints return tab def min_best_rescale(vals, min_val, max_val, less_than_min=1):", "3, 0]) >>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) #", "<= self.max) else: raise ValueError(\"No max and/or min specified in", "times_ingress_egress=None): \"\"\" Determines if the ``target`` is observable at each", "MoonIlluminationConstraint(Constraint): \"\"\" Constrain the fractional illumination of the Earth's moon.", "cached_moon['altaz'].alt moon_down_mask = moon_alt < 0 moon_up_mask = moon_alt >=", "max=0.2) \"\"\" self.periodic_event = periodic_event if (min < 0) or", "-------- To constrain observations on orbital phases between 0.4 and", "the best (1) and worst (2.25). All values outside the", "maps to 2, etc. \"\"\" # TODO: This method could", "+ targets.shape else: # assume targets is a string. targkey", "\"\"\" Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum", "times in ``time_range`` with grid spacing ``time_resolution`` for ``observer``. Cache", "hashable thing from targets coords try: if hasattr(targets, 'frame'): #", "as-seen # by the observer. # 'get_sun' returns ICRS coords.", "are observable during ``time_range`` given constraints in ``constraints_list`` for a", "Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in", "------- observable_months : list List of sets of unique integers", ": bool Forcefully use 0 pressure. Returns ------- altaz_dict :", "the meridian_transit times. \"\"\" if not hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache =", ", 0. , 0.2, 0.4, 0.8, 1. ]) \"\"\" rescaled", "<= self.max return lowermask & uppermask else: return max_best_rescale(alt, self.min,", "1 inclusive rescaled so that ``vals`` equal to ``max_val`` equal", "for ``vals`` above ``max_val``. (in some cases anything higher than", "moon_dict : dict Dictionary containing three key-value pairs. (1) 'times'", "Parameters ---------- min : `~datetime.time` Earliest local time (inclusive). `None`", "secondary eclipse. \"\"\" self.eclipsing_system = eclipsing_system def compute_constraint(self, times, observer=None,", "the shapes together using standard numpy rules. Returns ------- constraint_result", "be useful if not all observing blocks are valid over", "`None` indicates no limit. \"\"\" self.min = min self.max =", ">>> t2 = Time(\"2016-03-30T12:00:00\") >>> constraint = TimeConstraint(t1,t2) \"\"\" self.min", "Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. \"\"\"", "would be to associate an acceptable time range with a", "hasattr(constraints, '__len__'): constraints = [constraints] times = time_grid_from_range(time_range, time_grid_resolution) #", "will also be present if the ``time_range`` is given as", "Examples -------- rescale airmasses to between 0 and 1, with", "the rescale should return 0 below 35 and 1 above", "and 1 min_val : float worst acceptable value (rescales to", "targets = targets[..., np.newaxis] times, targets = observer._preprocess_inputs(times, targets, grid_times_targets=False)", "Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable", "(1, 1) shape coord targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis] else:", "`~astropy.time.Time` The times to compute the constraint. WHAT HAPPENS WHEN", "specified as \" \"astropy.time.Time objects.\") if self.max is not None:", "moon_illumination from .utils import time_grid_from_range from .target import get_skycoord __all__", "to evaluating multiple constraints. if targets.isscalar: # ensure we have", "warnings.simplefilter('ignore') min_time = Time(\"1950-01-01T00:00:00\") if self.min is None else self.min", "min altitude and 1 is the max. \"\"\" def __init__(self,", "of times on which to test the constraint time_range :", "between civil twilights (-6 degrees). \"\"\" return cls(max_solar_altitude=-6*u.deg, **kwargs) @classmethod", "\"\"\" __metaclass__ = ABCMeta def __call__(self, observer, targets, times=None, time_range=None,", "cls(min, max, **kwargs) def compute_constraint(self, times, observer, targets): # first", "(rescales to 0) max_val : float best value cared about", "= [constraint(observer, targets, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr", "rather than get sun here, since # it returns the", "moon_up_mask = moon_alt >= 0 illumination = cached_moon['illum'] if self.min", "future editors - the order matters here # moon.separation(targets) is", "------- cache_key : tuple A hashable tuple for use as", "subclass of Constraint raise NotImplementedError class AltitudeConstraint(Constraint): \"\"\" Constrain the", "import numpy as np >>> altitudes = np.array([20, 30, 40,", "= force_pressure_zero @classmethod def twilight_civil(cls, **kwargs): \"\"\" Consider nighttime as", "is not None: mask = secz <= self.max elif self.max", "max_time for t in times.datetime]) except BaseException: # use np.bool", "numpy as np >>> altitudes = np.array([20, 30, 40, 45,", ", 0.2, 0.4, 0.8, 1. ]) \"\"\" rescaled = (vals", "# altitude calculations. applied_constraints = [constraint(observer, targets, times=times, grid_times_targets=True) for", "order matters here # moon.separation(targets) is NOT the same as", "PrimaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times during primary eclipse. \"\"\"", "None: mask = ((self.min <= moon_separation) & (moon_separation <= self.max))", "exoplanet system. min : float (optional) Minimum phase (inclusive) on", "= [constraints] applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True)", "LICENSE.rst \"\"\" Specify and constraints to determine which targets are", "of time observable'] target_names = [target.name for target in targets]", "axis=1) always_obs = np.all(constraint_arr, axis=1) frac_obs = np.sum(constraint_arr, axis=1) /", "or `None` Minimum airmass of the target. `None` indicates no", "not None and self.max is not None: mask = (self.min", "`~astroplan.FixedTarget`} Target times : `~astropy.time.Time` (optional) Array of mid-event times", "else self.min max_time = Time(\"2120-01-01T00:00:00\") if self.max is None else", "backward compatibility _current_year_time_range = Time( # needed for backward compatibility", "to # complex nature of these objects. We make #", "def compute_constraint(self, times, observer, targets): solar_altitude = self._get_solar_altitudes(times, observer, targets)", "if max is None: self.max = 90*u.deg else: self.max =", "and 1.2, for example, you should subtract one from the", "rules. Returns ------- constraint_result : 1D or 2D array of", "'_meridian_transit_cache'): observer._meridian_transit_cache = {} # convert times to tuple for", "__init__(self, min=None, max=None, ephemeris=None): \"\"\" Parameters ---------- min : `~astropy.units.Quantity`", "a \" \"maximum time.\") if self.min is not None: if", "observable in the time range given the constraints. \"\"\" if", "(rescales to 1) greater_than_max : 0 or 1 what is", "first index and times along the second. \"\"\" if times", "def grey(cls, min=0.25, max=0.65, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with", "mx = self.max mi = 1 if self.min is None", "the same as targets.separation(moon) # the former calculates the separation", "for a 24 hour period centered on that time. time_grid_resolution", "``time_resolution`` for ``observer``. Cache the result on the ``observer`` object.", "``observer``` object. This routine will provide an appropriate, hashable, key", "is not None: if not isinstance(self.max, Time): raise TypeError(\"Time limits", "or bool The constraints. If 2D with targets along the", "1 if self.min is None else self.min # values below", "- the order matters here # moon.separation(targets) is NOT the", "self.min is not None: mask = self.min <= moon_separation elif", "met between test times in ``time_range`` by checking constraint at", "coordinates at those times. \"\"\" if not hasattr(observer, '_altaz_cache'): observer._altaz_cache", "observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour, grid_times_targets=False): \"\"\" Compute the constraint", "(secz <= self.max) else: raise ValueError(\"No max and/or min specified", "determine which targets are observable for an observer. \"\"\" from", "import astropy.units as u >>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day)", "is one. Examples -------- To constrain observations on orbital phases", "self.max >= moon_separation elif self.max is None and self.min is", "the moon illumination for those times. \"\"\" if not hasattr(observer,", "= self.min else: min_time = self.min = datetime.time(0, 0, 0)", "observable', 'fraction of time observable'] target_names = [target.name for target", "non-eclipsing binary, or exoplanet system. min : float (optional) Minimum", "time.\") if self.min is not None: if not isinstance(self.min, Time):", "mask = ((self.min <= separation) & (separation <= self.max)) else:", "None: mask = secz <= self.max elif self.max is None", "self.min, self.max) class AirmassConstraint(AltitudeConstraint): \"\"\" Constrain the airmass of a", "observer if self.min is not None: timezone = self.min.tzinfo elif", "time limits used in calls to `is_observable` or `is_always_observable`. \"\"\"", "SecondaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times during secondary eclipse. \"\"\"", "np.logical_and.reduce(applied_constraints) return np.all(constraint_arr, axis=1) def is_observable(constraints, observer, targets, times=None, time_range=None,", "the distance between the Earth's moon and some targets. \"\"\"", "indicates no limit. max : `~astropy.units.Quantity` or `None` Maximum altitude", "of Constraint raise NotImplementedError class AltitudeConstraint(Constraint): \"\"\" Constrain the altitude", "the phase. For example, the system could be an eclipsing", "observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey] @abstractmethod class Constraint(object):", "times in ``time_range`` by checking constraint at linearly-spaced times separated", "= times_ingress_egress[:, 1] applied_constraints_ing = [constraint(observer, target, times=times_ing, grid_times_targets=True) for", "that is what we want. moon_separation = moon.separation(targets) if self.min", "def bright(cls, min=0.65, max=None, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with", "and target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity`", "ValueError('The minimum of the PhaseConstraint must be within' ' the", "Observer.at_site(\"Subaru\") >>> t1 = Time(\"2016-03-28T12:00:00\") >>> t2 = Time(\"2016-03-30T12:00:00\") >>>", "for a specific ``observer``, given the supplied ``constraints``. Parameters ----------", "bool (optional) Force the pressure to zero for solar altitude", "None: mask = ((self.min <= illumination) & (illumination <= self.max))", "is None: self.max = 90*u.deg else: self.max = max self.boolean_constraint", "as a cache key \"\"\" # make a tuple from", "zero) Returns ------- array of floats between 0 and 1", "force_pressure_zero=True): \"\"\" Parameters ---------- max_solar_altitude : `~astropy.units.Quantity` The altitude of", "and ``observer``. Cache the result on the ``observer`` object. Parameters", "A hashable tuple for use as a cache key \"\"\"", "_make_cache_key(times, targets): \"\"\" Make a unique key to reference this", "of 0.25 Parameters ---------- min : float or `None` (optional)", "as \" \"astropy.time.Time objects.\") def compute_constraint(self, times, observer, targets): with", "altitudes to be between 0 and 1, with the best", "subclass System on which to compute the phase. For example,", "minimum of 0.65 and no maximum Parameters ---------- min :", "a minimum or a \" \"maximum time.\") if self.min is", "min=0.65, max=None, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of", "observer, targets): cached_altaz = _get_altaz(times, observer, targets) alt = cached_altaz['altaz'].alt", "is not None: mask = self.min <= separation elif self.min", "to use. If not given, use the one set with", "the location argument here, which causes small <1 deg #", "The column ``'time observable'`` will also be present if the", "def compute_constraint(self, times, observer, targets): \"\"\" Actually do the real", "defaults of a minimum of 0.25 and a maximum of", "initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.25", "if min is None: self.min = -90*u.deg else: self.min =", "over the time limits used in calls to `is_observable` or", "System on which to compute the phase. For example, the", "set per target. These integers are 1-based so that January", "length as ``times`` for whether or not the target is", "dropping to the trigonometric # altitude calculations. if not hasattr(constraints,", "= vals > max_val rescaled[below] = less_than_min rescaled[above] = 0", "constraint_arr = np.logical_and.reduce(applied_constraints) return np.all(constraint_arr, axis=1) def is_observable(constraints, observer, targets,", "create a constraint that requires the airmass be \"better than", "float worst acceptable value (rescales to 0) max_val : float", ": dict Dictionary containing a key-value pair. 'times' contains the", "[0, 1], where 0 is the min altitude and 1", "constraints = [constraints] times = time_grid_from_range(time_range, time_grid_resolution) # TODO: This", "\"\"\" return cls(max_solar_altitude=-6*u.deg, **kwargs) @classmethod def twilight_nautical(cls, **kwargs): \"\"\" Consider", "__future__ import (absolute_import, division, print_function, unicode_literals) # Standard library from", "as datetime.time objects.\") if self.max is not None: if not", "= times.datetime tab.meta['observer'] = observer tab.meta['constraints'] = constraints return tab", "observable during ``time_range`` given constraints in ``constraints_list`` for a particular", "# TODO: This method could be sped up a lot", "in ``constraints`` for a particular ``observer``. Parameters ---------- constraints :", "division, print_function, unicode_literals) # Standard library from abc import ABCMeta,", "which causes small <1 deg # innacuracies, but it is", "on that time. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is", ">>> import numpy as np >>> airmasses = np.array([1, 1.5,", "limits must be specified as datetime.time objects.\") def compute_constraint(self, times,", "appropriate, hashable, key to store these calculations in a dictionary.", "make # to simple numpy arrays of the same shape", "else: self.max = max self.boolean_constraint = boolean_constraint def compute_constraint(self, times,", "hours. \"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters ---------- min", "given the constraints in ``constraints_list`` for ``observer``. Parameters ---------- constraints", "This method could be sped up a lot by dropping", "observer, targets): # use get_body rather than get sun here,", "None: mask = self.min <= moon_separation elif self.min is not", "1 above 60. >>> from astroplan.constraints import max_best_rescale >>> import", ": 2D array of float or bool The constraints, with", "# innacuracies, but it is needed until astropy PR #5897", "(which is set to 'builtin' by default). \"\"\" self.min =", "= max self.ephemeris = ephemeris def compute_constraint(self, times, observer, targets):", "compute_constraint(self, times, observer, targets): cached_altaz = _get_altaz(times, observer, targets) alt", "import Time >>> subaru = Observer.at_site(\"Subaru\") >>> t1 = Time(\"2016-03-28T12:00:00\")", "evaluating multiple constraints. if targets.isscalar: # ensure we have a", "if self.max is None: raise ValueError(\"Cannot have a float AirmassConstraint", "\"\"\" if times is None and time_range is not None:", "what we want. moon_separation = moon.separation(targets) if self.min is None", "`~astropy.time.Time` (optional) Array of mid-event times on which to test", "and one), where the ``max_val`` goes to one, and the", "time_grid_resolution=0.5*u.hour): \"\"\" Determines if the ``targets`` are observable during ``time_range``", "raise TypeError(\"Time limits must be specified as datetime.time objects.\") def", "self.max is not None: mask = self.max >= separation elif", "second. \"\"\" # Should be implemented on each subclass of", "if not hasattr(constraints, '__len__'): constraints = [constraints] times = time_grid_from_range(time_range,", "force_zero_pressure: observer.pressure = observer_old_pressure return observer._altaz_cache[aakey] def _get_moon_data(times, observer, force_zero_pressure=False):", "observable for an observer. \"\"\" from __future__ import (absolute_import, division,", "else: self.min = min if max is None: self.max =", "case for users who care about the upper limit on", "else: if self.max is None: raise ValueError(\"Cannot have a float", "or `None` (optional) Maximum acceptable separation between moon and target", "each time in ``times``, given constraints in ``constraints`` for a", "or list of targets times : `~astropy.time.Time` (optional) Array of", "b = as_strided(x, shape=shp2, strides=[0] * len(shp2)) output_shape = np.broadcast(a,", "---------- min : `~astropy.time.Time` Earliest time (inclusive). `None` indicates no", "of target (inclusive). `None` indicates no limit. \"\"\" self.min =", "specified in \" \"MoonSeparationConstraint.\") return mask class MoonIlluminationConstraint(Constraint): \"\"\" Constrain", "broadcasting the shapes together using standard numpy rules. Returns -------", "the observable hours. \"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters", "hours. Returns ------- observable_months : list List of sets of", "the airmass of a target. In the current implementation the", "- max_val) / (min_val - max_val) below = vals <", "the table will be for a 24 hour period centered", "nighttime as time between nautical twilights (-12 degrees). \"\"\" return", "= [constraint(observer, target, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr", "at those times and (3) contains the moon illumination for", "and not the lower limit. Parameters ---------- max : float", "`~astropy.time.Time` (optional) Array of ingress and egress times for ``N``", "The constraints, with targets along the first index and times", "\"AirmassConstraint.\") return mask else: if self.max is None: raise ValueError(\"Cannot", "tab['fraction of time observable'] * 24*u.hour tab.meta['times'] = times.datetime tab.meta['observer']", "broadcasting operations are relatively slow # but there is potential", "self.ephemeris = ephemeris @classmethod def dark(cls, min=None, max=0.25, **kwargs): \"\"\"", "times), ``'observer'`` (the `~astroplan.Observer` object), and ``'constraints'`` (containing the supplied", "not None: if not isinstance(self.max, datetime.time): raise TypeError(\"Time limits must", ": `~astropy.time.Time` The times to compute the constraint. WHAT HAPPENS", "moon and target (inclusive). `None` indicates no limit. max :", "= self.max >= solar_separation elif self.max is None and self.min", "don't cause problems mask = np.bool_(min_time <= times.datetime.time() <= max_time)", "always_obs = np.all(constraint_arr, axis=1) frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1]", "Minimum acceptable separation between Sun and target (inclusive). `None` indicates", "one, in some cases it should return zero) Returns -------", "maximum of 0.65 Parameters ---------- min : float or `None`", ">>> import astropy.units as u >>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'),", "AirmassConstraint if max is None.\") else: mx = self.max mi", "be below ``horizon``. \"\"\" @u.quantity_input(horizon=u.deg) def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True): \"\"\"", "mask else: if self.max is None: raise ValueError(\"Cannot have a", "time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Creates a table with information about observability", "compute the constraint observer : `~astroplan.Observer` the observaton location from", "constraint_arr = np.logical_and.reduce(applied_constraints) colnames = ['target name', 'ever observable', 'always", "not None: # broadcasting times v targets is slow due", "since # it returns the Sun's coordinates in an observer", "given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set", "-------- rescale an array of altitudes to be between 0", "higher altitude than airmass=2:: AirmassConstraint(2) \"\"\" def __init__(self, max=None, min=1,", "= get_moon(times, ephemeris=self.ephemeris) # note to future editors - the", "1 min_val : float worst acceptable value (rescales to 0)", "key-value pairs. (1) 'times' contains the times for the computations,", "which it is considered to be \"night\" (inclusive). force_pressure_zero :", "None: max_time = self.max else: max_time = datetime.time(23, 59, 59)", "1) shape coord targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis] else: targets", "Accessing the longitude # attribute of the frame data should", "np.all(constraint_arr, axis=1) def is_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\"", "False, the constraint returns a float on [0, 1], where", "primary eclipse. \"\"\" self.eclipsing_system = eclipsing_system def compute_constraint(self, times, observer=None,", "(inclusive). `None` indicates no limit. boolean_constraint : bool If True,", "from astropy import table import numpy as np from numpy.lib.stride_tricks", "(inclusive). `None` indicates no limit. \"\"\" self.min = min self.max", "__init__(self, min=None, max=None, ephemeris=None): \"\"\" Parameters ---------- min : float", "the constraints targets : sequence of `~astroplan.Target` The targets on", "\"night\" (inclusive). force_pressure_zero : bool (optional) Force the pressure to", "time sequence. time_grid_resolution : `~astropy.units.quantity` Time-grid spacing grid_times_targets : bool", "compute_constraint(self, times, observer, targets): \"\"\" Actually do the real work", "a maximum of 0.25 Parameters ---------- min : float or", "altitude of the target (inclusive). `None` indicates no limit. max", "# make a tuple from times try: timekey = tuple(times.jd)", "in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) else: times_ing = times_ingress_egress[:, 0]", "is not None: min_time = self.min else: min_time = self.min", "1) or (max < 0) or (max > 1): raise", "self.max) else: raise ValueError(\"No max and/or min specified in \"", "hasattr(constraints, '__len__'): constraints = [constraints] applied_constraints = [constraint(observer, targets, times=times,", "np.logical_and.reduce(applied_constraints) return np.any(constraint_arr, axis=1) def is_event_observable(constraints, observer, target, times=None, times_ingress_egress=None):", "dictionary. Parameters ---------- times : `~astropy.time.Time` Array of times on", "\"max_best_rescale\", \"min_best_rescale\", \"PhaseConstraint\", \"is_event_observable\"] _current_year = time.localtime().tm_year # needed for", "to store these calculations in a dictionary. Parameters ---------- times", "observer._moon_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0", "the result on the ``observer`` object. Parameters ---------- times :", "not hasattr(observer, '_moon_cache'): observer._moon_cache = {} # convert times to", "limit. max : `~astropy.units.Quantity` or `None` Maximum altitude of the", "\"\"\" Constrain the airmass of a target. In the current", "rescale an array of altitudes to be between 0 and", "separation between moon and target (inclusive). `None` indicates no limit.", "of the moon coord # which is GCRS, and that", "timekey = (times.jd,) # make hashable thing from targets coords", "targets=None): mask = self.eclipsing_system.in_primary_eclipse(times) return mask class SecondaryEclipseConstraint(Constraint): \"\"\" Constrain", "applied_constraints = [constraint(observer, targets, times=times, grid_times_targets=True) for constraint in constraints]", "<= solar_separation elif self.min is not None and self.max is", "< min_val above = vals > max_val rescaled[below] = less_than_min", "or list of targets time_range : `~astropy.time.Time` (optional) Lower and", "to the trigonometric # altitude calculations. if not hasattr(constraints, '__len__'):", "is 0.5 hours. Returns ------- observable_months : list List of", "tuple for hashing aakey = _make_cache_key(times, 'moon') if aakey not", "and no maximum Parameters ---------- min : float or `None`", "key \"\"\" # make a tuple from times try: timekey", ": `~astropy.units.quantity` Time-grid spacing grid_times_targets : bool if True, grids", "\"\"\" Compute the constraint for this class Parameters ---------- observer", "except BaseException: # use np.bool so shape queries don't cause", "# get timezone from time objects, or from observer if", "Time >>> import astropy.units as u >>> binary = PeriodicEvent(epoch=Time('2017-01-01", "((self.min <= solar_separation) & (solar_separation <= self.max)) else: raise ValueError(\"No", "not None: if not isinstance(self.max, Time): raise TypeError(\"Time limits must", "List of booleans of same length as ``targets`` for whether", "not None: mask = (self.min <= illumination) & moon_up_mask elif", "below ``min_val``. (in some cases anything less than ``min_val`` should", "str(_current_year) + '-12-31'] ) def _make_cache_key(times, targets): \"\"\" Make a", "with information about observability for all the ``targets`` over the", "common case for users who care about the upper limit", "an array of all the times), ``'observer'`` (the `~astroplan.Observer` object),", "mask = self.eclipsing_system.in_primary_eclipse(times) return mask class SecondaryEclipseConstraint(Constraint): \"\"\" Constrain observations", "shape=shp2, strides=[0] * len(shp2)) output_shape = np.broadcast(a, b).shape if output_shape", "# broadcasting times and targets against each other if targets", "Parameters ---------- min : `~astropy.time.Time` Earliest time (inclusive). `None` indicates", "float or `None` Maximum airmass of the target. `None` indicates", "max_time) for t in times.datetime]) except BaseException: mask = np.bool_((times.datetime.time()", "Lower and upper bounds on time sequence. time_grid_resolution : `~astropy.units.quantity`", "i.e. at a higher altitude than airmass=2:: AirmassConstraint(2) \"\"\" def", "max_val, greater_than_max=1): \"\"\" rescales an input array ``vals`` to be", "35, 60) # doctest: +FLOAT_CMP array([ 0. , 0. ,", "= PhaseConstraint(binary, min=0.4, max=0.6) The minimum and maximum phase must", "unicode_literals) # Standard library from abc import ABCMeta, abstractmethod import", "treat as a SkyCoord object. Accessing the longitude # attribute", "= cached_moon['altaz'].alt moon_down_mask = moon_alt < 0 moon_up_mask = moon_alt", "self.min, (phase >= self.min) & (phase <= self.max), (phase >=", "between the Galactic plane and some targets. \"\"\" def __init__(self,", "import max_best_rescale >>> import numpy as np >>> altitudes =", "the airmass is approximated by the secant of the zenith", "between nautical twilights (-12 degrees). \"\"\" return cls(max_solar_altitude=-12*u.deg, **kwargs) @classmethod", "constraint. targets : `~astropy.coordinates.SkyCoord` Target or list of targets. Returns", "``time_range`` with grid spacing ``time_resolution`` for ``observer``. Cache the result", "should be astropy 1.3.2 moon = get_moon(times, ephemeris=self.ephemeris) # note", "dict Dictionary containing three key-value pairs. (1) 'times' contains the", "List of sets of unique integers representing each month that", "MoonSeparationConstraint(Constraint): \"\"\" Constrain the distance between the Earth's moon and", "None else self.min max_time = Time(\"2120-01-01T00:00:00\") if self.max is None", "grid_times_targets=False): \"\"\" Compute the constraint for this class Parameters ----------", "if hasattr(targets, 'frame'): # treat as a SkyCoord object. Accessing", "__init__(self, eclipsing_system): \"\"\" Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which", "one, and the ``min_val`` goes to zero. Parameters ---------- vals", "goes to zero. Parameters ---------- vals : array-like the values", "'always observable', 'fraction of time observable'] target_names = [target.name for", "defaults of a minimum of 0.65 and no maximum Parameters", "observer=None, targets=None): mask = self.eclipsing_system.in_primary_eclipse(times) return mask class SecondaryEclipseConstraint(Constraint): \"\"\"", "(between zero and one), where the ``max_val`` goes to one,", "eclipsing_system def compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_primary_eclipse(times) return", "the ``target`` is observable at each time in ``times``, given", "to reference this combination of ``times`` and ``targets``. Often, we", "and self.min is not None: mask = (self.min <= illumination)", "a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint`", "if aakey not in observer._altaz_cache: try: if self.force_pressure_zero: observer_old_pressure =", "Sun's coordinates in an observer # centred frame, so the", "else: max_time = datetime.time(23, 59, 59) # If time limits", "is not None: # broadcasting times v targets is slow", "__init__(self, min=None, max=None, boolean_constraint=True): if min is None: self.min =", "dark(cls, min=None, max=0.25, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults", "targets. Returns ------- cache_key : tuple A hashable tuple for", "(``max``) and not the lower limit. Parameters ---------- max :", "= tab['fraction of time observable'] * 24*u.hour tab.meta['times'] = times.datetime", "max self.ephemeris = ephemeris @classmethod def dark(cls, min=None, max=0.25, **kwargs):", "result = np.broadcast_to(result, output_shape) return result @abstractmethod def compute_constraint(self, times,", "def dark(cls, min=None, max=0.25, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with", "on which to compute the phase. For example, the system", "is observable at each time in ``times``, given constraints in", "= [constraints] is_24hr_table = False if hasattr(time_range, 'isscalar') and time_range.isscalar:", "= secz <= self.max elif self.max is None and self.min", "times=times_egr, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr))", "min=0.4, max=0.6) The minimum and maximum phase must be described", "outside the range should return 0. >>> from astroplan.constraints import", "constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer`", "t in times[observable]]) months_observable.append(s) return months_observable def observability_table(constraints, observer, targets,", "not the target is ever observable at each time, given", "meridian_transit times. \"\"\" if not hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache = {}", "min self.max = max if self.min is None and self.max", "1], where 0 is the min altitude and 1 is", "target_names = [target.name for target in targets] ever_obs = np.any(constraint_arr,", "min=None, max=0.25, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of", "the ``min_val`` goes to zero. Parameters ---------- vals : array-like", "limits occur on same day: if min_time < max_time: try:", "interval [0, 1).') self.min = min if min is not", "rescaled so that ``vals`` equal to ``max_val`` equal 0 and", "= time_grid_from_range(time_range, time_grid_resolution) # TODO: This method could be sped", ".utils import time_grid_from_range from .target import get_skycoord __all__ = [\"AltitudeConstraint\",", "satisfied if the Moon has set. \"\"\" def __init__(self, min=None,", "sequence, with spacing ``time_resolution``. This will be passed as the", "of a target. In the current implementation the airmass is", "specified in \" \"GalacticLatitudeConstraint.\") return mask class SunSeparationConstraint(Constraint): \"\"\" Constrain", "bool The constraints, with targets along the first index and", "TIMES AND TIME_RANGE ARE SET? time_range : `~astropy.time.Time` (length =", "self.max is None else self.max mask = np.logical_and(times > min_time,", "self.min <= solar_separation elif self.min is not None and self.max", "you should subtract one from the second number: >>> constraint", "times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Determines if the ``targets`` are observable", "None: mask = self.min <= separation elif self.min is not", "\"is_observable\", \"is_always_observable\", \"time_grid_from_range\", \"GalacticLatitudeConstraint\", \"SunSeparationConstraint\", \"MoonSeparationConstraint\", \"MoonIlluminationConstraint\", \"LocalTimeConstraint\", \"PrimaryEclipseConstraint\", \"SecondaryEclipseConstraint\",", "if grid_times_targets: targets = get_skycoord(targets) # TODO: these broadcasting operations", "month that a target is observable, one set per target.", "targets : sequence of `~astroplan.Target` The targets on which to", "else: raise ValueError(\"No max and/or min specified in \" \"MoonSeparationConstraint.\")", "observer : `~astroplan.Observer` the observation location from which to apply", "is None: timezone = observer.timezone if self.min is not None:", "so shape queries don't cause problems mask = np.bool_(min_time <=", "rescale should return 0 below 35 and 1 above 60.", "self.max return lowermask & uppermask else: return max_best_rescale(alt, self.min, self.max)", "must be described on the interval [0, 1). To constrain", "def min_best_rescale(vals, min_val, max_val, less_than_min=1): \"\"\" rescales an input array", "self.max is not None: mask = ((self.min <= separation) &", "import ABCMeta, abstractmethod import datetime import time import warnings #", "test the constraint. targets : `~astropy.coordinates.SkyCoord` Target or list of", "one set with `~astropy.coordinates.solar_system_ephemeris` (which is set to 'builtin' by", "that are observable between 2016-03-28 and 2016-03-30: >>> from astroplan", "``'observer'`` (the `~astroplan.Observer` object), and ``'constraints'`` (containing the supplied ``constraints``).", "observer) moon_alt = cached_moon['altaz'].alt moon_down_mask = moon_alt < 0 moon_up_mask", "times_ingress_egress[:, 0] times_egr = times_ingress_egress[:, 1] applied_constraints_ing = [constraint(observer, target,", "inclusive rescaled so that ``vals`` equal to ``max_val`` equal 0", "<= self.max)) return mask def is_always_observable(constraints, observer, targets, times=None, time_range=None,", "trigonometric # altitude calculations. applied_constraints = [constraint(observer, targets, times=times, grid_times_targets=True)", "except BaseException: mask = np.bool_((times.datetime.time() >= min_time) or (times.datetime.time() <=", "grid_times_targets=False) observer._altaz_cache[aakey] = dict(times=times, altaz=altaz) finally: if force_zero_pressure: observer.pressure =", "Ephemeris to use. If not given, use the one set", "not each target is ever observable in the time range", "time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" A function to determine whether ``targets`` are", "max=0.25, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of no", "return lowermask & uppermask else: return max_best_rescale(alt, self.min, self.max) class", "observer, targets): cached_altaz = _get_altaz(times, observer, targets) secz = cached_altaz['altaz'].secz.value", "<= max_time)) return mask class TimeConstraint(Constraint): \"\"\"Constrain the observing time", "pressure. Returns ------- moon_dict : dict Dictionary containing three key-value", "1 and worst (35) going to 0. For values outside", "bool Examples -------- To create a constraint that requires the", "hashable, key to store these calculations in a dictionary. Parameters", "astropy.time import Time import astropy.units as u from astropy.coordinates import", "to test the constraint observer : `~astroplan.Observer` The observer who", "array of all the times), ``'observer'`` (the `~astroplan.Observer` object), and", "min=0.6, max=0.2) \"\"\" self.periodic_event = periodic_event if (min < 0)", "force_zero_pressure: observer.pressure = observer_old_pressure return observer._moon_cache[aakey] def _get_meridian_transit_times(times, observer, targets):", "If not given, use the one set with `~astropy.coordinates.solar_system_ephemeris` (which", "the observations to targets that are observable between 23:50 and", "self.max is not None: mask = secz <= self.max elif", "(optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit.", "``max_val`` should also return one, in some cases it should", "self.max = max self.ephemeris = ephemeris def compute_constraint(self, times, observer,", "time to be within certain time limits. An example use", "to current year (localtime) time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range``", "the times), ``'observer'`` (the `~astroplan.Observer` object), and ``'constraints'`` (containing the", "= self.min.tzinfo elif self.max is not None: timezone = self.max.tzinfo", "import moon_illumination from .utils import time_grid_from_range from .target import get_skycoord", "max=0.65, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a", "---------- times : `~astropy.time.Time` Array of times on which to", "ABCMeta def __call__(self, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour, grid_times_targets=False): \"\"\"", "day: if min_time < max_time: try: mask = np.array([min_time <=", "simple numpy arrays of the same shape and # broadcast", "AirmassConstraint(2) \"\"\" def __init__(self, max=None, min=1, boolean_constraint=True): self.min = min", "`~astropy.units.Quantity` or `None` Minimum altitude of the target (inclusive). `None`", "'_altaz_cache'): observer._altaz_cache = {} aakey = _make_cache_key(times, 'sun') if aakey", "we rely on broadcasting the shapes together using standard numpy", "calculations. if not hasattr(constraints, '__len__'): constraints = [constraints] times =", "if aakey not in observer._altaz_cache: try: if force_zero_pressure: observer_old_pressure =", "who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target", "2D array of float or bool The constraints, with targets", "= (self.max >= illumination) | moon_down_mask elif self.max is None", "Returns ------- observability_table : `~astropy.table.Table` A Table containing the observability", "time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): \"\"\" Determines which month the specified ``targets`` are", "in observer._altaz_cache: try: if self.force_pressure_zero: observer_old_pressure = observer.pressure observer.pressure =", "inclusive rescaled so that ``vals`` equal to ``min_val`` equal 0", "less_than_min = 0) # doctest: +FLOAT_CMP array([ 1. , 0.6,", "altitude of the target (inclusive). `None` indicates no limit. boolean_constraint", "the max. \"\"\" def __init__(self, min=None, max=None, boolean_constraint=True): if min", "Time( # needed for backward compatibility [str(_current_year) + '-01-01', str(_current_year)", "for t in times[observable]]) months_observable.append(s) return months_observable def observability_table(constraints, observer,", "which to test the constraint. observer : `~astroplan.Observer` The observer", "`~astropy.time.Time` The times to compute the constraint observer : `~astroplan.Observer`", "\"\"\" self.min = min self.max = max def compute_constraint(self, times,", "fractional illumination (inclusive). `None` indicates no limit. ephemeris : str,", "object. This routine will provide an appropriate, hashable, key to", "must at least supply either a minimum or a maximum", "about (rescales to 1) less_than_min : 0 or 1 what", "of mid-event times on which to test the constraints times_ingress_egress", "the interval [0, 1).') self.min = min if min is", "between the two times in ``time_range`` with grid spacing ``time_resolution``", ">>> from astroplan import PeriodicEvent >>> from astropy.time import Time", "targets): with warnings.catch_warnings(): warnings.simplefilter('ignore') min_time = Time(\"1950-01-01T00:00:00\") if self.min is", "Constrain the Sun to be below ``horizon``. \"\"\" @u.quantity_input(horizon=u.deg) def", "`~astroplan.FixedTarget`} Target or list of targets Returns ------- time_dict :", "if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} aakey = _make_cache_key(times,", "None # get timezone from time objects, or from observer", "(optional) If ``time_range`` is specified, determine whether constraints are met", "observer who has constraints ``constraints`` target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}", "and that is what we want. moon_separation = moon.separation(targets) if", "return mask class TimeConstraint(Constraint): \"\"\"Constrain the observing time to be", "of time observable'``. The column ``'time observable'`` will also be", "observer._altaz_cache[aakey] def _get_moon_data(times, observer, force_zero_pressure=False): \"\"\" Calculate moon altitude az", "and constraints to determine which targets are observable for an", "max_val, less_than_min=1): \"\"\" rescales an input array ``vals`` to be", "compute_constraint(self, times, observer, targets): # use get_body rather than get", "'times' contains the times for the alt/az computations, (2) 'altaz'", "All values outside the range should return 0. >>> from", "``constraints`` for a particular ``observer``. Parameters ---------- constraints : list", "best value cared about (rescales to 1) greater_than_max : 0", "else: raise ValueError(\"No max and/or min specified in \" \"AirmassConstraint.\")", "``target`` at times linearly spaced between the two times in", "\"\"\" self.max_solar_altitude = max_solar_altitude self.force_pressure_zero = force_pressure_zero @classmethod def twilight_civil(cls,", "2016-03-28 and 2016-03-30: >>> from astroplan import Observer >>> from", "mask = (self.max >= illumination) | moon_down_mask elif self.max is", "' the interval [0, 1).') self.min = min if min", "self.max is not None: if not isinstance(self.max, Time): raise TypeError(\"Time", "maximum Parameters ---------- min : float or `None` (optional) Minimum", "The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`,", "target (inclusive). `None` indicates no limit. boolean_constraint : bool If", ": `~astropy.time.Time` Earliest time (inclusive). `None` indicates no limit. max", "= self.max mi = 1 if self.min is None else", "ever_observable : list List of booleans of same length as", "alt <= self.max return lowermask & uppermask else: return max_best_rescale(alt,", "class AirmassConstraint(AltitudeConstraint): \"\"\" Constrain the airmass of a target. In", "= observer_old_pressure return observer._altaz_cache[aakey] def _get_moon_data(times, observer, force_zero_pressure=False): \"\"\" Calculate", "(self.min <= illumination) & moon_up_mask elif self.min is not None", "needed for backward compatibility _current_year_time_range = Time( # needed for", "horizon and the corrections for atmospheric refraction return nonsense values.", "time sequence, with spacing ``time_resolution``. This will be passed as", "Time(\"2120-01-01T00:00:00\") if self.max is None else self.max mask = np.logical_and(times", "max : float (optional) Maximum phase (inclusive) on interval [0,", "if self.min is None else self.min # values below 1", "limit. max : `~datetime.time` Latest local time (inclusive). `None` indicates", "a cache key \"\"\" # make a tuple from times", "the longitude # attribute of the frame data should be", "= PhaseConstraint(binary, min=0.6, max=0.2) \"\"\" self.periodic_event = periodic_event if (min", "in constraints] constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return constraint_arr def months_observable(constraints,", "# prior to evaluating multiple constraints. if targets.isscalar: # ensure", "Earliest time (inclusive). `None` indicates no limit. max : `~astropy.time.Time`", "---------- vals : array-like the values that need to be", "0 below 35 and 1 above 60. >>> from astroplan.constraints", "implementation the airmass is approximated by the secant of the", "class PhaseConstraint(Constraint): \"\"\" Constrain observations to times in some range", "time sequence If ``time_range`` is not specified, defaults to current", "max_best_rescale(vals, min_val, max_val, greater_than_max=1): \"\"\" rescales an input array ``vals``", "targets on which to apply the constraints. Returns ------- constraint_result", "zero and one), where the ``max_val`` goes to one, and", "= 90*u.deg else: self.max = max self.boolean_constraint = boolean_constraint def", "min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between", "self.max)) else: raise ValueError(\"No max and/or min specified in \"", "determine whether constraints are met between test times in ``time_range``", "cls(max_solar_altitude=-18*u.deg, **kwargs) def _get_solar_altitudes(self, times, observer, targets): if not hasattr(observer,", "time in ``times``, given constraints in ``constraints`` for a particular", "astropy.units as u from astropy.coordinates import get_body, get_sun, get_moon, Galactic,", "\"\"\" Make a unique key to reference this combination of", "------- time_dict : dict Dictionary containing a key-value pair. 'times'", "**kwargs) @classmethod def grey(cls, min=0.25, max=0.65, **kwargs): \"\"\" initialize a", "not the lower limit. Parameters ---------- max : float or", "1D or 2D array of float or bool The constraints.", "not None: mask = ((self.min <= moon_separation) & (moon_separation <=", "try: if hasattr(targets, 'frame'): # treat as a SkyCoord object.", "\"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters ---------- min :", "those times and (3) contains the moon illumination for those", "to 2, etc. \"\"\" # TODO: This method could be", "return cls(max_solar_altitude=-6*u.deg, **kwargs) @classmethod def twilight_nautical(cls, **kwargs): \"\"\" Consider nighttime", "Target or list of targets time_range : `~astropy.time.Time` (optional) Lower", "try: timekey = tuple(times.jd) + times.shape except BaseException: # must", "and 1, with the best (1) and worst (2.25). All", "not None: mask = (self.max >= illumination) | moon_down_mask elif", "and/or min specified in \" \"MoonSeparationConstraint.\") return mask class LocalTimeConstraint(Constraint):", "frame of the moon coord # which is GCRS, and", ">>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP array([ 0. ,", "lower limit. Parameters ---------- max : float or `None` Maximum", "`None` Maximum altitude of the target (inclusive). `None` indicates no", "Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints", "indicates no limit. \"\"\" return cls(min, max, **kwargs) def compute_constraint(self,", "same day: if min_time < max_time: try: mask = np.array([min_time", "(scalar) time, the table will be for a 24 hour", "moon_alt = cached_moon['altaz'].alt moon_down_mask = moon_alt < 0 moon_up_mask =", "objects.\") if self.max is not None: if not isinstance(self.max, datetime.time):", "[constraints] times = time_grid_from_range(time_range, time_grid_resolution) # TODO: This method could", "times = time_grid_from_range(time_range, time_grid_resolution) # TODO: This method could be", "observable'``, and ``'fraction of time observable'``. The column ``'time observable'``", "is None else self.min # values below 1 should be", "of the PhaseConstraint must be within' ' the interval [0,", "min : `~astropy.time.Time` Earliest time (inclusive). `None` indicates no limit.", "\"\"\" Calculate moon altitude az and illumination for an array", "Minimum acceptable separation between moon and target (inclusive). `None` indicates", "secz elif self.min is not None and self.max is not", "acceptable fractional illumination (inclusive). `None` indicates no limit. \"\"\" return", "None: timezone = self.min.tzinfo elif self.max is not None: timezone", "at a higher altitude than airmass=2:: AirmassConstraint(2) \"\"\" def __init__(self,", "raise ValueError(\"You must at least supply either a minimum or", "= 1 if self.min is None else self.min # values", "the constraint. targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list", "to between 0 and 1, with the best (1) and", "times, observer, targets): # removed the location argument here, which", "no limit. \"\"\" return cls(min, max, **kwargs) @classmethod def grey(cls,", "1.0 def compute_constraint(self, times, observer=None, targets=None): phase = self.periodic_event.phase(times) mask", "``time_resolution``. Default is 0.5 hours. Returns ------- observability_table : `~astropy.table.Table`", "Default is 0.5 hours. Returns ------- observability_table : `~astropy.table.Table` A", "not None: mask = self.max >= moon_separation elif self.max is", "key to reference this combination of ``times`` and ``targets``. Often,", "self.min = min if max is None: self.max = 90*u.deg", "None and self.max is not None: mask = ((self.min <=", "that are observable between 23:50 and 04:08 local time: >>>", "\"\"\" Calculate alt/az for ``target`` at times linearly spaced between", "mask = (self.min <= secz) & (secz <= self.max) else:", "= 0) # doctest: +FLOAT_CMP array([ 1. , 0.6, 0.2,", "\" \"astropy.time.Time objects.\") def compute_constraint(self, times, observer, targets): with warnings.catch_warnings():", "the observability information for each of the ``targets``. The table", "The observer who has constraints ``constraints`` target : {list, `~astropy.coordinates.SkyCoord`,", "treated as a boolean (True for within the limits and", "astroplan.constraints import LocalTimeConstraint >>> import datetime as dt >>> subaru", "needed for backward compatibility [str(_current_year) + '-01-01', str(_current_year) + '-12-31']", "of ingress and egress times for ``N`` events, with shape", "= np.array([1, 1.5, 2, 3, 0]) >>> min_best_rescale(airmasses, 1, 2.25,", "at times linearly spaced between the two times in ``time_range``", "returns the Sun's coordinates in an observer # centred frame,", "moon and target (inclusive). `None` indicates no limit. ephemeris :", "in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) months_observable = [] for target,", "in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) colnames = ['target name', 'ever", "return mask class SecondaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times during", "is ever observable at each time, given the constraints. \"\"\"", "fractional illumination (inclusive). `None` indicates no limit. \"\"\" return cls(min,", "the pressure to zero for solar altitude calculations. This avoids", "times, observer=None, targets=None): mask = self.eclipsing_system.in_primary_eclipse(times) return mask class SecondaryEclipseConstraint(Constraint):", "of times for ``targets`` and ``observer``. Cache the result on", "min self.max = max self.ephemeris = ephemeris @classmethod def dark(cls,", "'isscalar') and time_range.isscalar: time_range = (time_range-12*u.hour, time_range+12*u.hour) is_24hr_table = True", "astroplan import Observer >>> from astropy.time import Time >>> subaru", "targets): cached_altaz = _get_altaz(times, observer, targets) alt = cached_altaz['altaz'].alt if", "constraint_arr = np.logical_and.reduce(applied_constraints) return np.any(constraint_arr, axis=1) def is_event_observable(constraints, observer, target,", "04:08 local time: >>> from astroplan import Observer >>> from", "solar_separation = sun.separation(targets) if self.min is None and self.max is", "max=dt.time(4,8)) \"\"\" self.min = min self.max = max if self.min", "in times.datetime]) except BaseException: mask = np.bool_((times.datetime.time() >= min_time) or", "<= secz elif self.min is not None and self.max is", "of 0.65 Parameters ---------- min : float or `None` (optional)", "output_shape = np.broadcast(a, b).shape if output_shape != np.array(result).shape: result =", "removed the location argument here, which causes small <1 deg", "* len(shp1)) b = as_strided(x, shape=shp2, strides=[0] * len(shp2)) output_shape", "no limit. max : `~astropy.units.Quantity` or `None` Maximum altitude of", "`~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are", "``times`` for whether or not the target is ever observable", "'ever observable', 'always observable', 'fraction of time observable'] target_names =", "min_val, max_val, less_than_min=1): \"\"\" rescales an input array ``vals`` to", "or from observer if self.min is not None: timezone =", "is 0.5 hours. Returns ------- observability_table : `~astropy.table.Table` A Table", "each subclass of Constraint raise NotImplementedError class AltitudeConstraint(Constraint): \"\"\" Constrain", "target, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints)", "sure the output has the same shape as would result", "import datetime import time import warnings # Third-party from astropy.time", "of the zenith angle. .. note:: The ``max`` and ``min``", "bool Forcefully use 0 pressure. Returns ------- altaz_dict : dict", "self.max_solar_altitude = max_solar_altitude self.force_pressure_zero = force_pressure_zero @classmethod def twilight_civil(cls, **kwargs):", "---------- times : `~astropy.time.Time` The times to compute the constraint", "= max_solar_altitude self.force_pressure_zero = force_pressure_zero @classmethod def twilight_civil(cls, **kwargs): \"\"\"", "'altaz' contains the corresponding alt/az coordinates at those times and", "self.max >= separation elif self.max is None and self.min is", "block. This can be useful if not all observing blocks", "observer, targets): with warnings.catch_warnings(): warnings.simplefilter('ignore') min_time = Time(\"1950-01-01T00:00:00\") if self.min", "]) \"\"\" rescaled = (vals - min_val) / (max_val -", "0 altaz = observer.altaz(times, targets, grid_times_targets=False) observer._altaz_cache[aakey] = dict(times=times, altaz=altaz)", "Constrain the distance between the Galactic plane and some targets.", ".. note:: The ``max`` and ``min`` arguments appear in the", "\"astropy.time.Time objects.\") if self.max is not None: if not isinstance(self.max,", "and ``'constraints'`` (containing the supplied ``constraints``). \"\"\" if not hasattr(constraints,", "= np.array([1]) a = as_strided(x, shape=shp1, strides=[0] * len(shp1)) b", "from astropy.time import Time import astropy.units as u from astropy.coordinates", "bounds on time sequence, with spacing ``time_resolution``. This will be", "max_time = self.max else: max_time = datetime.time(23, 59, 59) #", "not None: mask = self.max >= separation elif self.max is", "than ``max_val`` should also return one, in some cases it", "needed until astropy PR #5897 is released # which should", "self.min <= separation elif self.min is not None and self.max", "Constrain the distance between the Sun and some targets. \"\"\"", ": array-like the values that need to be rescaled to", "current year (localtime) time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is", "\"\"\" Abstract class for objects defining observational constraints. \"\"\" __metaclass__", "sun = get_body('sun', times, location=observer.location) solar_separation = sun.separation(targets) if self.min", "``constraints`` target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times : `~astropy.time.Time`", "(max, min) in this initializer to support the common case", "queries don't cause problems mask = np.bool_(min_time <= times.datetime.time() <=", "the time limits used in calls to `is_observable` or `is_always_observable`.", "time, the table will be for a 24 hour period", "raise TypeError(\"Time limits must be specified as datetime.time objects.\") if", "negative Parameters ---------- min : `~astropy.units.Quantity` or `None` Minimum altitude", "for within the limits and False for outside). If False,", "and some targets. \"\"\" def __init__(self, min=None, max=None, ephemeris=None): \"\"\"", "observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Determines if the ``targets``", "time objects, or from observer if self.min is not None:", "constraints] constraint_arr = np.logical_and.reduce(applied_constraints) months_observable = [] for target, observable", "if the ``target`` is observable at each time in ``times``,", "numpy.lib.stride_tricks import as_strided # Package from .moon import moon_illumination from", "0 and 1 min_val : float worst acceptable value (rescales", "observations to targets that are observable between 2016-03-28 and 2016-03-30:", "= np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return constraint_arr def months_observable(constraints, observer, targets, time_range=_current_year_time_range,", "table contains four columns with information about the target and", "of the same shape and # broadcast these to find", "min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP array([", "= [] for target, observable in zip(targets, constraint_arr): s =", "import datetime as dt >>> subaru = Observer.at_site(\"Subaru\", timezone=\"US/Hawaii\") >>>", "= min self.max = max self.ephemeris = ephemeris def compute_constraint(self,", "<= separation elif self.min is not None and self.max is", "self.max else: max_time = datetime.time(23, 59, 59) # If time", "be described on the interval [0, 1). To constrain observations", "or `None` (optional) Maximum acceptable separation between Sun and target", "return cls(max_solar_altitude=-12*u.deg, **kwargs) @classmethod def twilight_astronomical(cls, **kwargs): \"\"\" Consider nighttime", "illum=illumination, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._moon_cache[aakey]", "# to simple numpy arrays of the same shape and", "Sun that can occur when the Sun is below the", "self.min is not None: min_time = self.min else: min_time =", "> min_time, times < max_time) return mask class PrimaryEclipseConstraint(Constraint): \"\"\"", "the target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity`", "convert times to tuple for hashing aakey = _make_cache_key(times, targets)", "# by the observer. # 'get_sun' returns ICRS coords. sun", "\" \"AirmassConstraint.\") return mask else: if self.max is None: raise", "for t in times.datetime]) except BaseException: mask = np.bool_((times.datetime.time() >=", "some range of phases for a periodic event (e.g.~transiting exoplanets,", "upper bounds on time sequence If ``time_range`` is not specified,", "and 1, with the best (60) going to 1 and", "alt uppermask = alt <= self.max return lowermask & uppermask", "= ((self.min <= solar_separation) & (solar_separation <= self.max)) else: raise", "indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Minimum", "expensive calculations for a combination of ``targets`` and ``times`` in", "aakey not in observer._moon_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure", "solar_separation) & (solar_separation <= self.max)) else: raise ValueError(\"No max and/or", "or exoplanet system. min : float (optional) Minimum phase (inclusive)", "set to 'builtin' by default). \"\"\" self.min = min self.max", ">>> t1 = Time(\"2016-03-28T12:00:00\") >>> t2 = Time(\"2016-03-30T12:00:00\") >>> constraint", "target in targets] ever_obs = np.any(constraint_arr, axis=1) always_obs = np.all(constraint_arr,", "``min_val`` should also return one, in some cases it should", "Earliest local time (inclusive). `None` indicates no limit. max :", "to 'builtin' by default). \"\"\" self.min = min self.max =", "first is the moon up? cached_moon = _get_moon_data(times, observer) moon_alt", "cause problems mask = np.bool_(min_time <= times.datetime.time() <= max_time) #", "= TimeConstraint(t1,t2) \"\"\" self.min = min self.max = max if", "1] applied_constraints_ing = [constraint(observer, target, times=times_ing, grid_times_targets=True) for constraint in", "hour period centered on that time. time_grid_resolution : `~astropy.units.Quantity` (optional)", "targets are observable for an observer. \"\"\" from __future__ import", "Galactic plane and some targets. \"\"\" def __init__(self, min=None, max=None):", "between the Earth's moon and some targets. \"\"\" def __init__(self,", "(vals - min_val) / (max_val - min_val) below = vals", "(inclusive) on interval [0, 1). Default is one. Examples --------", "where the ``max_val`` goes to one, and the ``min_val`` goes", "``constraints``. force_zero_pressure : bool Forcefully use 0 pressure. Returns -------", "within' ' the interval [0, 1).') self.min = min if", "accessing the ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape else:", "altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._moon_cache[aakey] def", "returned for ``vals`` above ``max_val``. (in some cases anything higher", "<= t.time() <= max_time for t in times.datetime]) except BaseException:", "`None` indicates no limit. max : `~astropy.time.Time` Latest time (inclusive).", "in calls to `is_observable` or `is_always_observable`. \"\"\" def __init__(self, min=None,", "ValueError(\"You must at least supply either a minimum or a", "max and/or min specified in \" \"GalacticLatitudeConstraint.\") return mask class", "phase (inclusive) on interval [0, 1). Default is one. Examples", "to find the correct shape shp1, shp2 = times.shape, targets.shape", "times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" A function to determine whether ``targets``", "constraints ``constraints`` target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times :", "- see LICENSE.rst \"\"\" Specify and constraints to determine which", "times to tuple for hashing aakey = _make_cache_key(times, targets) if", "0 altaz = observer.moon_altaz(times) illumination = np.array(moon_illumination(times)) observer._moon_cache[aakey] = dict(times=times,", "= np.logical_and(times > min_time, times < max_time) return mask class", "# If time boundaries straddle midnight: else: try: mask =", "a key-value pair. 'times' contains the meridian_transit times. \"\"\" if", "times along the second. \"\"\" # Should be implemented on", "in observer._altaz_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure =", "illumination of the Earth's moon. Constraint is also satisfied if", "compute_constraint(self, times, observer=None, targets=None): phase = self.periodic_event.phase(times) mask = np.where(self.max", "problems mask = np.bool_(min_time <= times.datetime.time() <= max_time) # If", "is not None: mask = ((self.min <= illumination) & (illumination", "in ``time_range`` by checking constraint at linearly-spaced times separated by", "to ``min_val`` equal 0 and those equal to ``max_val`` equal", "elif self.min is not None and self.max is not None:", "for ``targets`` and ``observer``. Cache the result on the ``observer``", "times < max_time) return mask class PrimaryEclipseConstraint(Constraint): \"\"\" Constrain observations", "= ephemeris def compute_constraint(self, times, observer, targets): # removed the", "aakey = _make_cache_key(times, targets) if aakey not in observer._meridian_transit_cache: meridian_transit_times", "solar altitude at these times altaz = observer.altaz(times, get_sun(times)) altitude", "the order matters here # moon.separation(targets) is NOT the same", "min_val above = vals > max_val rescaled[below] = 0 rescaled[above]", "# use get_body rather than get sun here, since #", "\"\"\" Parameters ---------- min : `~astropy.time.Time` Earliest time (inclusive). `None`", "booleans of same length as ``targets`` for whether or not", "__metaclass__ = ABCMeta def __call__(self, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour,", "times=None, times_ingress_egress=None): \"\"\" Determines if the ``target`` is observable at", "t2 = Time(\"2016-03-30T12:00:00\") >>> constraint = TimeConstraint(t1,t2) \"\"\" self.min =", "equal to ``max_val`` equal 0 and those equal to ``min_val``", "# TODO: these broadcasting operations are relatively slow # but", "`None` indicates no limit. boolean_constraint : bool If True, the", "observable'] = tab['fraction of time observable'] * 24*u.hour tab.meta['times'] =", "limit. boolean_contstraint : bool Examples -------- To create a constraint", "not None: min_time = self.min else: min_time = self.min =", "max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP array([ 0. , 0.", "1. ]) \"\"\" rescaled = (vals - min_val) / (max_val", "it is considered to be \"night\" (inclusive). force_pressure_zero : bool", "the constraint result with targets along the first index and", "2.25, less_than_min = 0) # doctest: +FLOAT_CMP array([ 1. ,", "Time(\"1950-01-01T00:00:00\") if self.min is None else self.min max_time = Time(\"2120-01-01T00:00:00\")", "constraints in ``constraints_list`` for ``observer``. Parameters ---------- constraints : list", "is given as a scalar. It also contains metadata entries", "sped up a lot by dropping to the trigonometric #", "mask = ((self.min <= moon_separation) & (moon_separation <= self.max)) else:", "self.min is None and self.max is not None: mask =", "no minimum and a maximum of 0.25 Parameters ---------- min", "= observer._preprocess_inputs(times, targets, grid_times_targets=False) result = self.compute_constraint(times, observer, targets) #", "the range, the rescale should return 0 below 35 and", "Consider nighttime as time between nautical twilights (-12 degrees). \"\"\"", "separation) & (separation <= self.max)) else: raise ValueError(\"No max and/or", "innacuracies, but it is needed until astropy PR #5897 is", "``constraints_list`` for a particular ``observer``. Parameters ---------- constraints : list", "hasattr(constraints, '__len__'): constraints = [constraints] if times is not None:", "\"\"\" return cls(max_solar_altitude=-18*u.deg, **kwargs) def _get_solar_altitudes(self, times, observer, targets): if", "slow due to # complex nature of these objects. We", "targets coords try: if hasattr(targets, 'frame'): # treat as a", "zero. max : float (optional) Maximum phase (inclusive) on interval", "constraints. \"\"\" if not hasattr(constraints, '__len__'): constraints = [constraints] if", "= time.localtime().tm_year # needed for backward compatibility _current_year_time_range = Time(", "= _make_cache_key(times, targets) if aakey not in observer._meridian_transit_cache: meridian_transit_times =", "__init__(self, periodic_event, min=None, max=None): \"\"\" Parameters ---------- periodic_event : `~astroplan.periodic.PeriodicEvent`", "observer._altaz_cache: try: if self.force_pressure_zero: observer_old_pressure = observer.pressure observer.pressure = 0", "self.periodic_event = periodic_event if (min < 0) or (min >", "grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) else: times_ing", "data should be unique and is # quicker than accessing", "cases anything less than ``min_val`` should also return one, in", "\"\"\" return cls(max_solar_altitude=-12*u.deg, **kwargs) @classmethod def twilight_astronomical(cls, **kwargs): \"\"\" Consider", "be within' ' the interval [0, 1).') self.min = min", "if self.boolean_constraint: lowermask = self.min <= alt uppermask = alt", "constraints ``constraints``. force_zero_pressure : bool Forcefully use 0 pressure. Returns", "moon_alt < 0 moon_up_mask = moon_alt >= 0 illumination =", "which to compute the phase. For example, the system could", "Specify and constraints to determine which targets are observable for", "on time sequence If ``time_range`` is not specified, defaults to", "hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} aakey = _make_cache_key(times, 'sun') if", "mi, mx, less_than_min=0) class AtNightConstraint(Constraint): \"\"\" Constrain the Sun to", "0, 0) if self.max is not None: max_time = self.max", "GCRS, and that is what we want. moon_separation = moon.separation(targets)", "times on which to test the constraint. observer : `~astroplan.Observer`", "it is needed until astropy PR #5897 is released #", "time (inclusive). `None` indicates no limit. max : `~astropy.time.Time` Latest", "sun here, since # it returns the Sun's coordinates in", "observable hours. \"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters ----------", "about (rescales to 1) greater_than_max : 0 or 1 what", ", 0.6, 0.2, 0. , 0. ]) \"\"\" rescaled =", ">>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>> constraint = PhaseConstraint(binary,", "who has constraints ``constraints`` target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target", "cache on an ``observer``` object. This routine will provide an", "is None and time_range is not None: times = time_grid_from_range(time_range,", "on the airmass (``max``) and not the lower limit. Parameters", "time range given the constraints. \"\"\" if not hasattr(constraints, '__len__'):", "return one, in some cases it should return zero) Returns", "array of float or bool The constraints. If 2D with", "Sun and target (inclusive). `None` indicates no limit. max :", "be between 0 and 1 min_val : float worst acceptable", "import warnings # Third-party from astropy.time import Time import astropy.units", "float or `None` Minimum airmass of the target. `None` indicates", "times, observer, targets): cached_altaz = _get_altaz(times, observer, targets) alt =", "import Observer >>> from astroplan.constraints import LocalTimeConstraint >>> import datetime", "Array of times on which to test the constraint observer", "``'time observable'`` will also be present if the ``time_range`` is", "between test times in ``time_range`` by checking constraint at linearly-spaced", "observer._meridian_transit_cache: meridian_transit_times = observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey]", "def __init__(self, max=None, min=1, boolean_constraint=True): self.min = min self.max =", "the Sun that can occur when the Sun is below", "do the real work of computing the constraint. Subclasses override", "in an observer # centred frame, so the separation is", "applied_constraints = [constraint(observer, target, times=times, grid_times_targets=True) for constraint in constraints]", "of 0.65 and no maximum Parameters ---------- min : float", "list of targets time_range : `~astropy.time.Time` (optional) Lower and upper", "# which is GCRS, and that is what we want.", "current implementation the airmass is approximated by the secant of", "Returns ------- altaz_dict : dict Dictionary containing two key-value pairs.", "the altitude observer._altaz_cache[aakey] = dict(times=times, altitude=altitude) finally: if self.force_pressure_zero: observer.pressure", "minimum and maximum phase must be described on the interval", "for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.all(constraint_arr, axis=1)", "= vals > max_val rescaled[below] = 0 rescaled[above] = greater_than_max", "``targets`` over the requested ``time_range``, given the constraints in ``constraints_list``", "= eclipsing_system def compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_secondary_eclipse(times)", "times_ingress_egress : `~astropy.time.Time` (optional) Array of ingress and egress times", "mx, less_than_min=0) class AtNightConstraint(Constraint): \"\"\" Constrain the Sun to be", "mi = 1 if self.min is None else self.min #", "`~astroplan.periodic.EclipsingSystem` System which must be in primary eclipse. \"\"\" self.eclipsing_system", "on which to test the constraint. observer : `~astroplan.Observer` The", "the trigonometric # altitude calculations. if not hasattr(constraints, '__len__'): constraints", "if not hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache = {} # convert times", "<1 deg # innacuracies, but it is needed until astropy", "passed as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity`", "period centered on that time. time_grid_resolution : `~astropy.units.Quantity` (optional) If", "self.min is not None: mask = (self.min <= illumination) &", "tuple for hashing aakey = _make_cache_key(times, targets) if aakey not", "in some cases it should return zero) Returns ------- array", "def _make_cache_key(times, targets): \"\"\" Make a unique key to reference", "Should be implemented on each subclass of Constraint raise NotImplementedError", "limit. max : float or `None` (optional) Maximum acceptable fractional", "self.min = min self.max = max self.ephemeris = ephemeris @classmethod", "else: targets = targets[..., np.newaxis] times, targets = observer._preprocess_inputs(times, targets,", "times in some range of phases for a periodic event", "we wish to store expensive calculations for a combination of", "target. `None` indicates no limit. boolean_contstraint : bool Examples --------", "the constraint. WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE", "@abstractmethod def compute_constraint(self, times, observer, targets): \"\"\" Actually do the", "targets is not None: # broadcasting times v targets is", "if not isinstance(self.max, Time): raise TypeError(\"Time limits must be specified", "phase. For example, the system could be an eclipsing or", "get_skycoord(targets) # TODO: these broadcasting operations are relatively slow #", ": str, optional Ephemeris to use. If not given, use", "minimum or a \" \"maximum time.\") if self.min is not", "`is_observable` or `is_always_observable`. \"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters", "and upper bounds on time sequence. time_grid_resolution : `~astropy.units.quantity` Time-grid", "time_grid_from_range(time_range, time_resolution=time_grid_resolution) if is_24hr_table: tab['time observable'] = tab['fraction of time", "nature of these objects. We make # to simple numpy", "-------- To create a constraint that requires the airmass be", "if not isinstance(self.min, datetime.time): raise TypeError(\"Time limits must be specified", ">>> from astroplan.constraints import max_best_rescale >>> import numpy as np", "convert times to tuple for hashing aakey = _make_cache_key(times, 'moon')", "True, the constraint is treated as a boolean (True for", "released # which should be astropy 1.3.2 moon = get_moon(times,", "def __init__(self, min=None, max=None, boolean_constraint=True): if min is None: self.min", "degrees). \"\"\" return cls(max_solar_altitude=-6*u.deg, **kwargs) @classmethod def twilight_nautical(cls, **kwargs): \"\"\"", "System which must be in primary eclipse. \"\"\" self.eclipsing_system =", "str, optional Ephemeris to use. If not given, use the", "and target (inclusive). `None` indicates no limit. ephemeris : str,", "each of the ``targets``. The table contains four columns with", "for a combination of ``targets`` and ``times`` in a cache", "style license - see LICENSE.rst \"\"\" Specify and constraints to", "for this class Parameters ---------- observer : `~astroplan.Observer` the observation", "of floats between 0 and 1 inclusive rescaled so that", "list of targets. observer : `~astroplan.Observer` The observer who has", "observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets", "which should be astropy 1.3.2 moon = get_moon(times, ephemeris=self.ephemeris) #", "self.min is not None: timezone = self.min.tzinfo elif self.max is", "< max_time) return mask class PrimaryEclipseConstraint(Constraint): \"\"\" Constrain observations to", "times. \"\"\" if not hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache = {} #", "`None` Minimum altitude of the target (inclusive). `None` indicates no", "should be disregarded return min_best_rescale(secz, mi, mx, less_than_min=0) class AtNightConstraint(Constraint):", "``time_range`` is not specified, defaults to current year (localtime) time_grid_resolution", "observability_table(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Creates a table", "and 1 inclusive rescaled so that ``vals`` equal to ``min_val``", "a dictionary. Parameters ---------- times : `~astropy.time.Time` Array of times", "is approximated by the secant of the zenith angle. ..", "and 1 above 60. >>> from astroplan.constraints import max_best_rescale >>>", "on which to test the constraint. targets : {list, `~astropy.coordinates.SkyCoord`,", "the specified ``targets`` are observable for a specific ``observer``, given", "np.array([20, 30, 40, 45, 55, 70]) >>> max_best_rescale(altitudes, 35, 60)", "to be between 0 and 1, with the best (60)", "0.65 Parameters ---------- min : float or `None` (optional) Minimum", "from the second number: >>> constraint = PhaseConstraint(binary, min=0.6, max=0.2)", "the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds", "observer_old_pressure else: altitude = observer._altaz_cache[aakey]['altitude'] return altitude def compute_constraint(self, times,", "will be passed as the first argument into `~astroplan.time_grid_from_range`. If", "_make_cache_key(times, 'sun') if aakey not in observer._altaz_cache: try: if self.force_pressure_zero:", "---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer :", "at each time, given the constraints. \"\"\" if not hasattr(constraints,", "per target. These integers are 1-based so that January maps", "0 and 1 inclusive rescaled so that ``vals`` equal to", "observability information for each of the ``targets``. The table contains", "Constrain observations to times during secondary eclipse. \"\"\" def __init__(self,", "two key-value pairs. (1) 'times' contains the times for the", "np.array(result).shape: result = np.broadcast_to(result, output_shape) return result @abstractmethod def compute_constraint(self,", "between 0 and 1 min_val : float worst acceptable value", "and is # quicker than accessing the ra attribute. targkey", "float on [0, 1], where 0 is the min altitude", "or `None` (optional) Minimum acceptable Galactic latitude of target (inclusive).", "routine will provide an appropriate, hashable, key to store these", "constraints] constraint_arr = np.logical_and.reduce(applied_constraints) colnames = ['target name', 'ever observable',", "(times.jd,) # make hashable thing from targets coords try: if", "with shape (``N``, 2). Returns ------- event_observable : `~numpy.ndarray` Array", "Returns ------- constraint_result : 1D or 2D array of float", "\"\"\" Determines if the ``targets`` are observable during ``time_range`` given", "= vals < min_val above = vals > max_val rescaled[below]", "np.logical_and(times > min_time, times < max_time) return mask class PrimaryEclipseConstraint(Constraint):", "np.array([min_time <= t.time() <= max_time for t in times.datetime]) except", "indicates no limit. \"\"\" return cls(min, max, **kwargs) @classmethod def", "less_than_min=1): \"\"\" rescales an input array ``vals`` to be a", "constraints, with targets along the first index and times along", "as u >>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>> constraint", "the range should return 0. >>> from astroplan.constraints import min_best_rescale", "index and times along the second. Otherwise, we rely on", "boolean_contstraint : bool Examples -------- To create a constraint that", "use 0 pressure. Returns ------- moon_dict : dict Dictionary containing", "not None: mask = self.min <= separation elif self.min is", "airmass of a target. In the current implementation the airmass", "can occur when the Sun is below the horizon and", "force_zero_pressure : bool Forcefully use 0 pressure. Returns ------- moon_dict", "max self.ephemeris = ephemeris def compute_constraint(self, times, observer, targets): #", "self.periodic_event.phase(times) mask = np.where(self.max > self.min, (phase >= self.min) &", "solar_altitude <= self.max_solar_altitude return mask class GalacticLatitudeConstraint(Constraint): \"\"\" Constrain the", "elif self.max is None and self.min is not None: mask", "(optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit.", "of booleans of same length as ``times`` for whether or", "to be a score (between zero and one), where the", "and/or min specified in \" \"SunSeparationConstraint.\") return mask class MoonSeparationConstraint(Constraint):", "example use case for this class would be to associate", "= Observer.at_site(\"Subaru\", timezone=\"US/Hawaii\") >>> # bound times between 23:50 and", "print_function, unicode_literals) # Standard library from abc import ABCMeta, abstractmethod", "meridian_transit_times = observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey] @abstractmethod", "between Sun and target (inclusive). `None` indicates no limit. \"\"\"", "(optional) Array of times on which to test the constraint", "\"\"\" Constrain observations to times in some range of phases", "= boolean_constraint def compute_constraint(self, times, observer, targets): cached_altaz = _get_altaz(times,", "shp2 = times.shape, targets.shape x = np.array([1]) a = as_strided(x,", "Galactic latitude of target (inclusive). `None` indicates no limit. max", "is not None: mask = self.max >= solar_separation elif self.max", "whether or not each target is observable in the time", "observing block. This can be useful if not all observing", "times, observer, targets): separation = abs(targets.transform_to(Galactic).b) if self.min is None", "rescaled = (vals - max_val) / (min_val - max_val) below", "get_body, get_sun, get_moon, Galactic, SkyCoord from astropy import table import", "0.2, 0.4, 0.8, 1. ]) \"\"\" rescaled = (vals -", "target is observable, one set per target. These integers are", "or not each target is ever observable in the time", "'__len__'): constraints = [constraints] is_24hr_table = False if hasattr(time_range, 'isscalar')", "Target or list of targets. observer : `~astroplan.Observer` The observer", "`~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between Sun and", "altaz.alt # cache the altitude observer._altaz_cache[aakey] = dict(times=times, altitude=altitude) finally:", "this class would be to associate an acceptable time range", "target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or", "constraints. If 2D with targets along the first index and", "broadcast these to find the correct shape shp1, shp2 =", "list of targets times : `~astropy.time.Time` (optional) Array of times", "constraint_arr def months_observable(constraints, observer, targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): \"\"\" Determines which", "def compute_constraint(self, times, observer, targets): separation = abs(targets.transform_to(Galactic).b) if self.min", "contains the meridian_transit times. \"\"\" if not hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache", "return mask class MoonSeparationConstraint(Constraint): \"\"\" Constrain the distance between the", "not isinstance(self.max, datetime.time): raise TypeError(\"Time limits must be specified as", "months_observable.append(s) return months_observable def observability_table(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour):", "``min_val`` goes to one, and the ``max_val`` goes to zero.", "**kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum", "[str(_current_year) + '-01-01', str(_current_year) + '-12-31'] ) def _make_cache_key(times, targets):", "Subclasses override this. Parameters ---------- times : `~astropy.time.Time` The times", "the constraints. \"\"\" if not hasattr(constraints, '__len__'): constraints = [constraints]", "\"PrimaryEclipseConstraint\", \"SecondaryEclipseConstraint\", \"Constraint\", \"TimeConstraint\", \"observability_table\", \"months_observable\", \"max_best_rescale\", \"min_best_rescale\", \"PhaseConstraint\", \"is_event_observable\"]", "`~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets time_range : `~astropy.time.Time`", "to the trigonometric # altitude calculations. applied_constraints = [constraint(observer, targets,", "\"\"\" def __init__(self, periodic_event, min=None, max=None): \"\"\" Parameters ---------- periodic_event", "------- moon_dict : dict Dictionary containing three key-value pairs. (1)", "observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" A function to determine", "Force the pressure to zero for solar altitude calculations. This", "constraint = PhaseConstraint(binary, min=0.6, max=0.2) \"\"\" self.periodic_event = periodic_event if", "np.bool_(min_time <= times.datetime.time() <= max_time) # If time boundaries straddle", "(True for within the limits and False for outside). If", ": bool If True, the constraint is treated as a", "should also return one, in some cases it should return", "subaru = Observer.at_site(\"Subaru\") >>> t1 = Time(\"2016-03-28T12:00:00\") >>> t2 =", "metadata entries ``'times'`` (with an array of all the times),", "between 0.4 and 0.6, >>> from astroplan import PeriodicEvent >>>", "not None: mask = ((self.min <= illumination) & (illumination <=", "constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.any(constraint_arr, axis=1) def", "None: mask = self.min <= solar_separation elif self.min is not", "string. targkey = (targets,) except BaseException: targkey = (targets.frame.data.lon,) return", "def _get_meridian_transit_times(times, observer, targets): \"\"\" Calculate next meridian transit for", "with the best (1) and worst (2.25). All values outside", "no limit. Examples -------- Constrain the observations to targets that", "return np.any(constraint_arr, axis=1) def is_event_observable(constraints, observer, target, times=None, times_ingress_egress=None): \"\"\"", "a boolean (True for within the limits and False for", "mask = ((self.min <= illumination) & (illumination <= self.max)) &", "targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets", "(length = 2) Lower and upper bounds on time sequence.", "the alt/az computations, (2) 'altaz' contains the corresponding alt/az coordinates", "<= illumination) & moon_up_mask elif self.min is not None and", "`None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no", "1): raise ValueError('The minimum of the PhaseConstraint must be within'", "= 0 altaz = observer.moon_altaz(times) illumination = np.array(moon_illumination(times)) observer._moon_cache[aakey] =", "self.max is not None: mask = self.max >= solar_separation elif", "| moon_down_mask elif self.max is None and self.min is not", "as u from astropy.coordinates import get_body, get_sun, get_moon, Galactic, SkyCoord", "one, and the ``max_val`` goes to zero. Parameters ---------- vals", "__init__(self, max=None, min=1, boolean_constraint=True): self.min = min self.max = max", "observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Creates a table with", "'frame'): # treat as a SkyCoord object. Accessing the longitude", ": `~astropy.coordinates.SkyCoord` Target or list of targets. Returns ------- cache_key", "uppermask else: return max_best_rescale(alt, self.min, self.max) class AirmassConstraint(AltitudeConstraint): \"\"\" Constrain", "up a lot by dropping to the trigonometric # altitude", "score (between zero and one), where the ``min_val`` goes to", "set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default).", "to future editors - the order matters here # moon.separation(targets)", "`~datetime.time` Latest local time (inclusive). `None` indicates no limit. Examples", "If True, the constraint is treated as a boolean (True", "(self.min <= secz) & (secz <= self.max) else: raise ValueError(\"No", "np.bool_((times.datetime.time() >= min_time) or (times.datetime.time() <= max_time)) return mask class", "no limit. \"\"\" return cls(min, max, **kwargs) @classmethod def bright(cls,", "times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Creates a table with information about", "or a maximum time.\") if self.min is not None: if", "on which to test the constraint observer : `~astroplan.Observer` The", "_get_altaz(times, observer, targets, force_zero_pressure=False): \"\"\" Calculate alt/az for ``target`` at", "on time sequence. time_grid_resolution : `~astropy.units.quantity` Time-grid spacing grid_times_targets :", "integers are 1-based so that January maps to 1, February", "cache_key : tuple A hashable tuple for use as a", "real work of computing the constraint. Subclasses override this. Parameters", "of target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity`", "_get_meridian_transit_times(times, observer, targets): \"\"\" Calculate next meridian transit for an", "orbital phases between 0.6 and 1.2, for example, you should", "Maximum acceptable separation between moon and target (inclusive). `None` indicates", "``max_val`` equal 0 and those equal to ``min_val`` equal 1", "provide an appropriate, hashable, key to store these calculations in", "# convert times, targets to tuple for hashing aakey =", "altaz = observer.altaz(times, get_sun(times)) altitude = altaz.alt # cache the", "if timezone is None: timezone = observer.timezone if self.min is", "have a (1, 1) shape coord targets = SkyCoord(np.tile(targets, 1))[:,", "WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE SET? time_range", "times, observer, targets): # use get_body rather than get sun", "max=None, ephemeris=None): \"\"\" Parameters ---------- min : `~astropy.units.Quantity` or `None`", "'moon') if aakey not in observer._moon_cache: try: if force_zero_pressure: observer_old_pressure", "is_24hr_table = False if hasattr(time_range, 'isscalar') and time_range.isscalar: time_range =", "mask class TimeConstraint(Constraint): \"\"\"Constrain the observing time to be within", "return 0. >>> from astroplan.constraints import min_best_rescale >>> import numpy", "and ``'fraction of time observable'``. The column ``'time observable'`` will", "targets. observer : `~astroplan.Observer` The observer who has constraints ``constraints``.", "is potential for huge speedup if the end user #", "matters here # moon.separation(targets) is NOT the same as targets.separation(moon)", "Parameters ---------- min : float or `None` (optional) Minimum acceptable", "Constrain the observations to targets that are observable between 23:50", "observer=None, targets=None): mask = self.eclipsing_system.in_secondary_eclipse(times) return mask class PhaseConstraint(Constraint): \"\"\"", "To constrain observations on orbital phases between 0.4 and 0.6,", "boolean_constraint : bool If True, the constraint is treated as", "range with a specific observing block. This can be useful", "raise ValueError(\"Cannot have a float AirmassConstraint if max is None.\")", "and ``min`` arguments appear in the order (max, min) in", "limit. Examples -------- Constrain the observations to targets that are", "the corresponding alt/az coordinates at those times. \"\"\" if not", "mask = np.array([min_time <= t.time() <= max_time for t in", "min=None, max=None, ephemeris=None): \"\"\" Parameters ---------- min : float or", "array of floats between 0 and 1 inclusive rescaled so", "observability_table : `~astropy.table.Table` A Table containing the observability information for", "Returns ------- time_dict : dict Dictionary containing a key-value pair.", "BaseException: # use np.bool so shape queries don't cause problems", "as a SkyCoord object. Accessing the longitude # attribute of", "min : float (optional) Minimum phase (inclusive) on interval [0,", "times between 23:50 and 04:08 local Hawaiian time >>> constraint", "time_range.isscalar: time_range = (time_range-12*u.hour, time_range+12*u.hour) is_24hr_table = True applied_constraints =", "multiple constraints. if targets.isscalar: # ensure we have a (1,", "blocks are valid over the time limits used in calls", "or (max < 0) or (max > 1): raise ValueError('The", "the supplied ``constraints``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint`", "list List of sets of unique integers representing each month", "an acceptable time range with a specific observing block. This", "constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints``", "year (localtime) time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified,", "and self.min is not None: mask = self.min <= solar_separation", "is not None: mask = self.min <= secz elif self.min", "to be within certain time limits. An example use case", "observable throughout ``time_range`` given constraints in the ``constraints_list`` for a", "\"\"\" return cls(min, max, **kwargs) @classmethod def bright(cls, min=0.65, max=None,", "59) # If time limits occur on same day: if", "max_time)) return mask class TimeConstraint(Constraint): \"\"\"Constrain the observing time to", "None: mask = self.max >= moon_separation elif self.max is None", "frac_obs]) if times is None and time_range is not None:", "one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by", "mask = np.logical_and(times > min_time, times < max_time) return mask", "array of float or bool The constraints, with targets along", "location from which to apply the constraints targets : sequence", "= np.logical_and.reduce(applied_constraints) months_observable = [] for target, observable in zip(targets,", "return result @abstractmethod def compute_constraint(self, times, observer, targets): \"\"\" Actually", "\"\"\" Constrain the distance between the Galactic plane and some", "(time_range-12*u.hour, time_range+12*u.hour) is_24hr_table = True applied_constraints = [constraint(observer, targets, times=times,", "if the ``time_range`` is given as a scalar. It also", "min self.max = max self.ephemeris = ephemeris def compute_constraint(self, times,", "= ((self.min <= separation) & (separation <= self.max)) else: raise", "(e.g.~transiting exoplanets, eclipsing binaries). \"\"\" def __init__(self, periodic_event, min=None, max=None):", "= {} # convert times, targets to tuple for hashing", "contains metadata entries ``'times'`` (with an array of all the", "``max`` and ``min`` arguments appear in the order (max, min)", "targets, grid_times_targets=False) result = self.compute_constraint(times, observer, targets) # make sure", "= 0 return rescaled def max_best_rescale(vals, min_val, max_val, greater_than_max=1): \"\"\"", "Standard library from abc import ABCMeta, abstractmethod import datetime import", "observer_old_pressure = observer.pressure observer.pressure = 0 altaz = observer.moon_altaz(times) illumination", "distance between the Sun and some targets. \"\"\" def __init__(self,", "frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1] tab = table.Table(names=colnames, data=[target_names,", "whether or not the target is ever observable at each", "object. Accessing the longitude # attribute of the frame data", "within certain time limits. An example use case for this", "\"MoonSeparationConstraint.\") return mask class MoonIlluminationConstraint(Constraint): \"\"\" Constrain the fractional illumination", "containing two key-value pairs. (1) 'times' contains the times for", ": `~astroplan.periodic.EclipsingSystem` System which must be in secondary eclipse. \"\"\"", "Target or list of targets Returns ------- time_dict : dict", "[0, 1). Default is zero. max : float (optional) Maximum", "np.logical_and.reduce(applied_constraints) else: times_ing = times_ingress_egress[:, 0] times_egr = times_ingress_egress[:, 1]", "the constraint observer : `~astroplan.Observer` the observaton location from which", "data=[target_names, ever_obs, always_obs, frac_obs]) if times is None and time_range", ": 0 or 1 what is returned for ``vals`` above", "the first argument into `~astroplan.time_grid_from_range`. If a single (scalar) time,", "None: mask = (self.min <= illumination) & moon_up_mask elif self.min", "for each of the ``targets``. The table contains four columns", "\"\"\" rescales an input array ``vals`` to be a score", "& moon_up_mask elif self.min is not None and self.max is", "[] for target, observable in zip(targets, constraint_arr): s = set([t.datetime.month", "elif self.max is not None: timezone = self.max.tzinfo if timezone", "sun below which it is considered to be \"night\" (inclusive).", ": `~astroplan.Observer` The observer who has constraints ``constraints`` targets :", "limits and False for outside). If False, the constraint returns", "\"\"\" Creates a table with information about observability for all", "# convert times to tuple for hashing aakey = _make_cache_key(times,", "az and illumination for an array of times for ``observer``.", "rescale airmasses to between 0 and 1, with the best", "deg # innacuracies, but it is needed until astropy PR", "def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True): \"\"\" Parameters ---------- max_solar_altitude : `~astropy.units.Quantity`", "time_grid_resolution) # TODO: This method could be sped up a", "constraints = [constraints] is_24hr_table = False if hasattr(time_range, 'isscalar') and", "for objects defining observational constraints. \"\"\" __metaclass__ = ABCMeta def", "are valid over the time limits used in calls to", "argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is", "observable_months : list List of sets of unique integers representing", "astropy import table import numpy as np from numpy.lib.stride_tricks import", "the altitude of the target. .. note:: This can misbehave", "if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0 altaz =", "targets time_range : `~astropy.time.Time` (optional) Lower and upper bounds on", "to zero. Parameters ---------- vals : array-like the values that", "a combination of ``targets`` and ``times`` in a cache on", "'sun') if aakey not in observer._altaz_cache: try: if self.force_pressure_zero: observer_old_pressure", "is not None: timezone = self.min.tzinfo elif self.max is not", "self.max = max def compute_constraint(self, times, observer, targets): separation =", "if hasattr(time_range, 'isscalar') and time_range.isscalar: time_range = (time_range-12*u.hour, time_range+12*u.hour) is_24hr_table", "which to apply the constraints. Returns ------- constraint_result : 2D", "and ``targets``. Often, we wish to store expensive calculations for", "`~astropy.time.Time` Latest time (inclusive). `None` indicates no limit. Examples --------", "constraint in constraints] constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return constraint_arr def", "np >>> airmasses = np.array([1, 1.5, 2, 3, 0]) >>>", "3-clause BSD style license - see LICENSE.rst \"\"\" Specify and", "of no minimum and a maximum of 0.25 Parameters ----------", ">>> constraint = TimeConstraint(t1,t2) \"\"\" self.min = min self.max =", "observable', 'always observable', 'fraction of time observable'] target_names = [target.name", "aakey = _make_cache_key(times, 'moon') if aakey not in observer._moon_cache: try:", "support the common case for users who care about the", "pair. 'times' contains the meridian_transit times. \"\"\" if not hasattr(observer,", "targets): separation = abs(targets.transform_to(Galactic).b) if self.min is None and self.max", "to 1 and worst (35) going to 0. For values", "(-18 degrees). \"\"\" return cls(max_solar_altitude=-18*u.deg, **kwargs) def _get_solar_altitudes(self, times, observer,", "objects.\") def compute_constraint(self, times, observer, targets): timezone = None #", "the target. `None` indicates no limit. min : float or", "{list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times : `~astropy.time.Time` (optional) Array of", "the ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints :", "example, you should subtract one from the second number: >>>", "(times.datetime.time() <= max_time)) return mask class TimeConstraint(Constraint): \"\"\"Constrain the observing", "\"\"\" Constrain the distance between the Sun and some targets.", "during primary eclipse. \"\"\" def __init__(self, eclipsing_system): \"\"\" Parameters ----------", "= (self.min <= illumination) & moon_up_mask elif self.min is not", "datetime.time): raise TypeError(\"Time limits must be specified as datetime.time objects.\")", "(vals - max_val) / (min_val - max_val) below = vals", "mask = self.max >= separation elif self.max is None and", "moon_separation = moon.separation(targets) if self.min is None and self.max is", "timekey = tuple(times.jd) + times.shape except BaseException: # must be", "observer._moon_cache = {} # convert times to tuple for hashing", "the constraints. Returns ------- constraint_result : 2D array of float", "= observer.pressure observer.pressure = 0 # find solar altitude at", "= np.array([min_time <= t.time() <= max_time for t in times.datetime])", "Time(\"2016-03-28T12:00:00\") >>> t2 = Time(\"2016-03-30T12:00:00\") >>> constraint = TimeConstraint(t1,t2) \"\"\"", "and 0.6, >>> from astroplan import PeriodicEvent >>> from astropy.time", "min_best_rescale(vals, min_val, max_val, less_than_min=1): \"\"\" rescales an input array ``vals``", "targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour, grid_times_targets=False): \"\"\" Compute the constraint for", "observer._preprocess_inputs(times, targets, grid_times_targets=False) result = self.compute_constraint(times, observer, targets) # make", "those equal to ``max_val`` equal 1 Examples -------- rescale an", "will be for a 24 hour period centered on that", "there is potential for huge speedup if the end user", "altaz = observer.moon_altaz(times) illumination = np.array(moon_illumination(times)) observer._moon_cache[aakey] = dict(times=times, illum=illumination,", "the Sun to be below ``horizon``. \"\"\" @u.quantity_input(horizon=u.deg) def __init__(self,", "= eclipsing_system def compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_primary_eclipse(times)", "(1) 'times' contains the times for the alt/az computations, (2)", "calls to `is_observable` or `is_always_observable`. \"\"\" def __init__(self, min=None, max=None):", "finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._moon_cache[aakey] def _get_meridian_transit_times(times,", "((self.min <= illumination) & (illumination <= self.max)) & moon_up_mask else:", "checking constraint at linearly-spaced times separated by ``time_resolution``. Default is", "`None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional)", "get_sun, get_moon, Galactic, SkyCoord from astropy import table import numpy", "& moon_up_mask else: raise ValueError(\"No max and/or min specified in", "_get_moon_data(times, observer) moon_alt = cached_moon['altaz'].alt moon_down_mask = moon_alt < 0", "altitudes = np.array([20, 30, 40, 45, 55, 70]) >>> max_best_rescale(altitudes,", "targets.shape else: # assume targets is a string. targkey =", "to targets that are observable between 2016-03-28 and 2016-03-30: >>>", ">>> altitudes = np.array([20, 30, 40, 45, 55, 70]) >>>", "max is not None else 1.0 def compute_constraint(self, times, observer=None,", "defaults of no minimum and a maximum of 0.25 Parameters", "targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr", "time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) colnames", "'altaz' contains the corresponding alt/az coordinates at those times. \"\"\"", "has constraints ``constraints``. force_zero_pressure : bool Forcefully use 0 pressure.", "__init__(self, min=None, max=None): \"\"\" Parameters ---------- min : `~datetime.time` Earliest", "= _make_cache_key(times, targets) if aakey not in observer._altaz_cache: try: if", "occur when the Sun is below the horizon and the", "supplied ``constraints``). \"\"\" if not hasattr(constraints, '__len__'): constraints = [constraints]", "associate an acceptable time range with a specific observing block.", "strides=[0] * len(shp2)) output_shape = np.broadcast(a, b).shape if output_shape !=", "nighttime as time between civil twilights (-6 degrees). \"\"\" return", "= time_grid_from_range(time_range, time_resolution=time_grid_resolution) if grid_times_targets: targets = get_skycoord(targets) # TODO:", "axis=1) def is_event_observable(constraints, observer, target, times=None, times_ingress_egress=None): \"\"\" Determines if", "min=None, max=None): \"\"\" Parameters ---------- min : `~astropy.time.Time` Earliest time", "moon_separation elif self.max is None and self.min is not None:", "no limit. \"\"\" self.min = min self.max = max def", "< 0) or (min > 1) or (max < 0)", "the two times in ``time_range`` with grid spacing ``time_resolution`` for", "Forcefully use 0 pressure. Returns ------- altaz_dict : dict Dictionary", "test the constraint. targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or", "[constraints] if times is not None: applied_constraints = [constraint(observer, target,", "phase = self.periodic_event.phase(times) mask = np.where(self.max > self.min, (phase >=", "None else 1.0 def compute_constraint(self, times, observer=None, targets=None): phase =", ": float or `None` Minimum airmass of the target. `None`", "& (secz <= self.max) else: raise ValueError(\"No max and/or min", "& (illumination <= self.max)) & moon_up_mask else: raise ValueError(\"No max", "array ``vals`` to be a score (between zero and one),", "not None: timezone = self.min.tzinfo elif self.max is not None:", "which must be in primary eclipse. \"\"\" self.eclipsing_system = eclipsing_system", "period=1*u.day) >>> constraint = PhaseConstraint(binary, min=0.4, max=0.6) The minimum and", "self.min = -90*u.deg else: self.min = min if max is", "is released # which should be astropy 1.3.2 moon =", "Default is 0.5 hours. Returns ------- ever_observable : list List", "None and time_range is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution)", "grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return", "abc import ABCMeta, abstractmethod import datetime import time import warnings", "observer.timezone if self.min is not None: min_time = self.min else:", "np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return constraint_arr def months_observable(constraints, observer, targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour):", "Array of times on which to test the constraint. targets", "is not None: if not isinstance(self.min, Time): raise TypeError(\"Time limits", "# ensure we have a (1, 1) shape coord targets", "test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper", "and upper bounds on time sequence, with spacing ``time_resolution``. This", "ever_obs = np.any(constraint_arr, axis=1) always_obs = np.all(constraint_arr, axis=1) frac_obs =", "constraint_arr.shape[1] tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs, frac_obs]) if times", "calculates the separation in the frame of the moon coord", "for example, you should subtract one from the second number:", "first index and times along the second. \"\"\" # Should", "less_than_min : 0 or 1 what is returned for ``vals``", "TimeConstraint(Constraint): \"\"\"Constrain the observing time to be within certain time", "\"MoonIlluminationConstraint\", \"LocalTimeConstraint\", \"PrimaryEclipseConstraint\", \"SecondaryEclipseConstraint\", \"Constraint\", \"TimeConstraint\", \"observability_table\", \"months_observable\", \"max_best_rescale\", \"min_best_rescale\",", "is not None: mask = (self.min <= illumination) & moon_up_mask", "ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: # assume", "which to test the constraint time_range : `~astropy.time.Time` (optional) Lower", "not None: mask = self.min <= secz elif self.min is", "if self.max is not None: if not isinstance(self.max, datetime.time): raise", "altitude=altitude) finally: if self.force_pressure_zero: observer.pressure = observer_old_pressure else: altitude =", "float best value cared about (rescales to 1) less_than_min :", ": float (optional) Maximum phase (inclusive) on interval [0, 1).", "where 0 is the min altitude and 1 is the", "at least supply either a minimum or a maximum time.\")", "an eclipsing or non-eclipsing binary, or exoplanet system. min :", "test the constraint observer : `~astroplan.Observer` The observer who has", "observer.pressure = 0 altaz = observer.altaz(times, targets, grid_times_targets=False) observer._altaz_cache[aakey] =", "the ``targets`` are observable during ``time_range`` given constraints in ``constraints_list``", "timezone is None: timezone = observer.timezone if self.min is not", "a target. In the current implementation the airmass is approximated", "and times along the second. \"\"\" if times is None", "Time): raise TypeError(\"Time limits must be specified as \" \"astropy.time.Time", "vals < min_val above = vals > max_val rescaled[below] =", "worst (35) going to 0. For values outside the range,", "force_zero_pressure=False): \"\"\" Calculate moon altitude az and illumination for an", "same length as ``times`` for whether or not the target", "on which to test the constraint time_range : `~astropy.time.Time` (optional)", "constraint result with targets along the first index and times", "``'always observable'``, and ``'fraction of time observable'``. The column ``'time", "= table.Table(names=colnames, data=[target_names, ever_obs, always_obs, frac_obs]) if times is None", "are met between test times in ``time_range`` by checking constraint", "\" \"GalacticLatitudeConstraint.\") return mask class SunSeparationConstraint(Constraint): \"\"\" Constrain the distance", "SET? time_range : `~astropy.time.Time` (length = 2) Lower and upper", "mask class LocalTimeConstraint(Constraint): \"\"\" Constrain the observable hours. \"\"\" def", "Default is one. Examples -------- To constrain observations on orbital", ": float (optional) Minimum phase (inclusive) on interval [0, 1).", "if self.force_pressure_zero: observer.pressure = observer_old_pressure else: altitude = observer._altaz_cache[aakey]['altitude'] return", "\"\"\" Parameters ---------- periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass System on", "the output has the same shape as would result from", "tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs, frac_obs]) if times is", "= _make_cache_key(times, 'moon') if aakey not in observer._moon_cache: try: if", "cached_altaz = _get_altaz(times, observer, targets) alt = cached_altaz['altaz'].alt if self.boolean_constraint:", "import astropy.units as u from astropy.coordinates import get_body, get_sun, get_moon,", "AltitudeConstraint(Constraint): \"\"\" Constrain the altitude of the target. .. note::", "= times_ingress_egress[:, 0] times_egr = times_ingress_egress[:, 1] applied_constraints_ing = [constraint(observer,", "for an observer. \"\"\" from __future__ import (absolute_import, division, print_function,", "---------- observer : `~astroplan.Observer` the observation location from which to", "certain time limits. An example use case for this class", "not hasattr(constraints, '__len__'): constraints = [constraints] times = time_grid_from_range(time_range, time_grid_resolution)", "floats between 0 and 1 inclusive rescaled so that ``vals``", "1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP array([ 1.", "t in times.datetime]) except BaseException: # use np.bool so shape", "if you try to constrain negative altitudes, as the `~astropy.coordinates.AltAz`", "broadcasting times v targets is slow due to # complex", "of the target (inclusive). `None` indicates no limit. max :", "= self.min <= separation elif self.min is not None and", "disregarded return min_best_rescale(secz, mi, mx, less_than_min=0) class AtNightConstraint(Constraint): \"\"\" Constrain", "phase must be described on the interval [0, 1). To", "doctest: +FLOAT_CMP array([ 1. , 0.6, 0.2, 0. , 0.", "SkyCoord(np.tile(targets, 1))[:, np.newaxis] else: targets = targets[..., np.newaxis] times, targets", "------- event_observable : `~numpy.ndarray` Array of booleans of same length", "mask = secz <= self.max elif self.max is None and", "``vals`` to be a score (between zero and one), where", "self.force_pressure_zero: observer.pressure = observer_old_pressure else: altitude = observer._altaz_cache[aakey]['altitude'] return altitude", "for an array of times for ``observer``. Cache the result", "override this. Parameters ---------- times : `~astropy.time.Time` The times to", "indicates no limit. max : `~datetime.time` Latest local time (inclusive).", "self.max_solar_altitude return mask class GalacticLatitudeConstraint(Constraint): \"\"\" Constrain the distance between", "be specified as datetime.time objects.\") def compute_constraint(self, times, observer, targets):", "1.5, 2, 3, 0]) >>> min_best_rescale(airmasses, 1, 2.25, less_than_min =", "for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.any(constraint_arr, axis=1)", "0.25 Parameters ---------- min : float or `None` (optional) Minimum", "__all__ = [\"AltitudeConstraint\", \"AirmassConstraint\", \"AtNightConstraint\", \"is_observable\", \"is_always_observable\", \"time_grid_from_range\", \"GalacticLatitudeConstraint\", \"SunSeparationConstraint\",", "min_best_rescale(secz, mi, mx, less_than_min=0) class AtNightConstraint(Constraint): \"\"\" Constrain the Sun", "= datetime.time(23, 59, 59) # If time limits occur on", "+FLOAT_CMP array([ 0. , 0. , 0.2, 0.4, 0.8, 1.", "a SkyCoord object. Accessing the longitude # attribute of the", "u from astropy.coordinates import get_body, get_sun, get_moon, Galactic, SkyCoord from", "than get sun here, since # it returns the Sun's", "not in observer._altaz_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure", "system. min : float (optional) Minimum phase (inclusive) on interval", "and 2016-03-30: >>> from astroplan import Observer >>> from astropy.time", "self.min = min self.max = max def compute_constraint(self, times, observer,", "self.max elif self.max is None and self.min is not None:", "use the one set with `~astropy.coordinates.solar_system_ephemeris` (which is set to", "is slow due to # complex nature of these objects.", "02:00'), period=1*u.day) >>> constraint = PhaseConstraint(binary, min=0.4, max=0.6) The minimum", "value cared about (rescales to 1) less_than_min : 0 or", "Examples -------- rescale an array of altitudes to be between", "constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) else: times_ing = times_ingress_egress[:,", "@classmethod def twilight_astronomical(cls, **kwargs): \"\"\" Consider nighttime as time between", "Galactic latitude of target (inclusive). `None` indicates no limit. \"\"\"", "self.min is not None: mask = self.min <= solar_separation elif", "a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of no minimum and a maximum", "using standard numpy rules. Returns ------- constraint_result : 1D or", "Minimum altitude of the target (inclusive). `None` indicates no limit.", "abs(targets.transform_to(Galactic).b) if self.min is None and self.max is not None:", "try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0 altaz", "\"\"\" Actually do the real work of computing the constraint.", "be a score (between zero and one), where the ``min_val``", "not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if grid_times_targets: targets =", "if force_zero_pressure: observer.pressure = observer_old_pressure return observer._moon_cache[aakey] def _get_meridian_transit_times(times, observer,", "datetime.time objects.\") def compute_constraint(self, times, observer, targets): timezone = None", "------- array of floats between 0 and 1 inclusive rescaled", "Returns ------- observable_months : list List of sets of unique", "is_24hr_table = True applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution,", "return mask else: if self.max is None: raise ValueError(\"Cannot have", "max=None): \"\"\" Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional)", "# Third-party from astropy.time import Time import astropy.units as u", "observer=None, targets=None): phase = self.periodic_event.phase(times) mask = np.where(self.max > self.min,", "(rescales to 1) less_than_min : 0 or 1 what is", "observer._altaz_cache = {} aakey = _make_cache_key(times, 'sun') if aakey not", "= 0 altaz = observer.altaz(times, targets, grid_times_targets=False) observer._altaz_cache[aakey] = dict(times=times,", "(optional) Maximum acceptable separation between Sun and target (inclusive). `None`", "result from # broadcasting times and targets against each other", "shape shp1, shp2 = times.shape, targets.shape x = np.array([1]) a", "the distance between the Galactic plane and some targets. \"\"\"", "'-12-31'] ) def _make_cache_key(times, targets): \"\"\" Make a unique key", "max_time = Time(\"2120-01-01T00:00:00\") if self.max is None else self.max mask", "targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" A function to determine whether", "is not None: mask = self.min <= moon_separation elif self.min", "array([ 1. , 0.6, 0.2, 0. , 0. ]) \"\"\"", "max and/or min specified in \" \"AirmassConstraint.\") return mask else:", "self.max >= solar_separation elif self.max is None and self.min is", "constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) \"\"\" self.min = min self.max =", "by dropping to the trigonometric # altitude calculations. if not", "None: raise ValueError(\"Cannot have a float AirmassConstraint if max is", "`~astropy.units.quantity` Time-grid spacing grid_times_targets : bool if True, grids the", "to constrain negative altitudes, as the `~astropy.coordinates.AltAz` frame tends to", "acceptable time range with a specific observing block. This can", "isinstance(self.min, datetime.time): raise TypeError(\"Time limits must be specified as datetime.time", "0 return rescaled def max_best_rescale(vals, min_val, max_val, greater_than_max=1): \"\"\" rescales", "not None else 0.0 self.max = max if max is", "information about the target and it's observability: ``'target name'``, ``'ever", "and False for outside). If False, the constraint returns a", "the target (inclusive). `None` indicates no limit. boolean_constraint : bool", "observable between 23:50 and 04:08 local time: >>> from astroplan", "a specific observing block. This can be useful if not", "(min > 1) or (max < 0) or (max >", "= self.eclipsing_system.in_secondary_eclipse(times) return mask class PhaseConstraint(Constraint): \"\"\" Constrain observations to", "Constrain the observations to targets that are observable between 2016-03-28", "'__len__'): constraints = [constraints] if times is not None: applied_constraints", "in observer._moon_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure =", "dict Dictionary containing a key-value pair. 'times' contains the meridian_transit", "| (phase <= self.max)) return mask def is_always_observable(constraints, observer, targets,", "in this initializer to support the common case for users", "(absolute_import, division, print_function, unicode_literals) # Standard library from abc import", "for whether or not each target is observable in the", "to zero for solar altitude calculations. This avoids errors in", "0.25 and a maximum of 0.65 Parameters ---------- min :", "tuple A hashable tuple for use as a cache key", "the frame data should be unique and is # quicker", "or `None` Maximum airmass of the target. `None` indicates no", "that requires the airmass be \"better than 2\", i.e. at", "astroplan import PeriodicEvent >>> from astropy.time import Time >>> import", "nautical twilights (-12 degrees). \"\"\" return cls(max_solar_altitude=-12*u.deg, **kwargs) @classmethod def", "= observer.pressure observer.pressure = 0 altaz = observer.altaz(times, targets, grid_times_targets=False)", "specified, defaults to current year (localtime) time_grid_resolution : `~astropy.units.Quantity` (optional)", "return cls(max_solar_altitude=-18*u.deg, **kwargs) def _get_solar_altitudes(self, times, observer, targets): if not", ": `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between moon", ": `~astropy.time.Time` (optional) Array of ingress and egress times for", "mask class MoonIlluminationConstraint(Constraint): \"\"\" Constrain the fractional illumination of the", "is not None: mask = ((self.min <= solar_separation) & (solar_separation", "0) if self.max is not None: max_time = self.max else:", "times=times_ing, grid_times_targets=True) for constraint in constraints] applied_constraints_egr = [constraint(observer, target,", "<= alt uppermask = alt <= self.max return lowermask &", "of the frame data should be unique and is #", "'_altaz_cache'): observer._altaz_cache = {} # convert times, targets to tuple", "import Observer >>> from astropy.time import Time >>> subaru =", "that ``vals`` equal to ``max_val`` equal 0 and those equal", "Abstract class for objects defining observational constraints. \"\"\" __metaclass__ =", "alt/az computations, (2) 'altaz' contains the corresponding alt/az coordinates at", "target, observable in zip(targets, constraint_arr): s = set([t.datetime.month for t", "also be present if the ``time_range`` is given as a", "it should return zero) Returns ------- array of floats between", "coords themselves # prior to evaluating multiple constraints. if targets.isscalar:", "self.min <= moon_separation elif self.min is not None and self.max", "if (min < 0) or (min > 1) or (max", "55, 70]) >>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP array([", ": `~astropy.time.Time` (optional) Array of times on which to test", "'builtin' by default). \"\"\" self.min = min self.max = max", "(35) going to 0. For values outside the range, the", "``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times", "contains the corresponding alt/az coordinates at those times. \"\"\" if", "the Sun is below the horizon and the corrections for", "min : float or `None` Minimum airmass of the target.", "specified as \" \"astropy.time.Time objects.\") def compute_constraint(self, times, observer, targets):", "= {} # convert times to tuple for hashing aakey", "else self.max mask = np.logical_and(times > min_time, times < max_time)", "time_grid_from_range(time_range, time_grid_resolution) # TODO: This method could be sped up", "convert times, targets to tuple for hashing aakey = _make_cache_key(times,", "same shape and # broadcast these to find the correct", "above = vals > max_val rescaled[below] = 0 rescaled[above] =", "self.max = 90*u.deg else: self.max = max self.boolean_constraint = boolean_constraint", ": `~astropy.units.Quantity` The altitude of the sun below which it", "# first is the moon up? cached_moon = _get_moon_data(times, observer)", "acceptable separation between moon and target (inclusive). `None` indicates no", "\"\"\" Parameters ---------- min : float or `None` (optional) Minimum", "not in observer._meridian_transit_cache: meridian_transit_times = observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times)", "with the best (60) going to 1 and worst (35)", "orbital phases between 0.4 and 0.6, >>> from astroplan import", "None else self.max mask = np.logical_and(times > min_time, times <", "the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If", "in ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints :", "one), where the ``max_val`` goes to one, and the ``min_val``", "self.max = max self.boolean_constraint = boolean_constraint def compute_constraint(self, times, observer,", "observer._altaz_cache[aakey] = dict(times=times, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure", "\"min_best_rescale\", \"PhaseConstraint\", \"is_event_observable\"] _current_year = time.localtime().tm_year # needed for backward", "``targets``. Often, we wish to store expensive calculations for a", "= alt <= self.max return lowermask & uppermask else: return", "and self.max is not None: mask = ((self.min <= solar_separation)", "`~astroplan.Observer` The observer who has constraints ``constraints`` target : {list,", "0. ]) \"\"\" rescaled = (vals - max_val) / (min_val", "observable in zip(targets, constraint_arr): s = set([t.datetime.month for t in", "def _get_altaz(times, observer, targets, force_zero_pressure=False): \"\"\" Calculate alt/az for ``target``", "``min_val`` equal 1 Examples -------- rescale airmasses to between 0", "1 Examples -------- rescale airmasses to between 0 and 1,", "time_resolution=time_grid_resolution) if grid_times_targets: targets = get_skycoord(targets) # TODO: these broadcasting", "We make # to simple numpy arrays of the same", "not hasattr(constraints, '__len__'): constraints = [constraints] applied_constraints = [constraint(observer, targets,", "sets of unique integers representing each month that a target", "colnames = ['target name', 'ever observable', 'always observable', 'fraction of", "# doctest: +FLOAT_CMP array([ 1. , 0.6, 0.2, 0. ,", "second. Otherwise, we rely on broadcasting the shapes together using", ">>> from astroplan.constraints import min_best_rescale >>> import numpy as np", "else: times_ing = times_ingress_egress[:, 0] times_egr = times_ingress_egress[:, 1] applied_constraints_ing", "max_val) / (min_val - max_val) below = vals < min_val", "if self.min is None and self.max is not None: mask", "``vals`` above ``max_val``. (in some cases anything higher than ``max_val``", ": 0 or 1 what is returned for ``vals`` below", "# needed for backward compatibility [str(_current_year) + '-01-01', str(_current_year) +", "for whether or not the target is ever observable at", "force_zero_pressure : bool Forcefully use 0 pressure. Returns ------- altaz_dict", "below which it is considered to be \"night\" (inclusive). force_pressure_zero", "= (targets.frame.data.lon,) return timekey + targkey def _get_altaz(times, observer, targets,", "= _get_altaz(times, observer, targets) alt = cached_altaz['altaz'].alt if self.boolean_constraint: lowermask", "have a float AirmassConstraint if max is None.\") else: mx", "if max is None.\") else: mx = self.max mi =", "Target or list of targets times : `~astropy.time.Time` (optional) Array", "axis=1) / constraint_arr.shape[1] tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs, frac_obs])", "min=None, max=None): \"\"\" Parameters ---------- min : `~astropy.units.Quantity` or `None`", "``time_range`` is given as a scalar. It also contains metadata", "np >>> altitudes = np.array([20, 30, 40, 45, 55, 70])", "solar_separation elif self.min is not None and self.max is not", "{list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets time_range :", "separated by ``time_resolution``. Default is 0.5 hours. Returns ------- observability_table", "len(shp1)) b = as_strided(x, shape=shp2, strides=[0] * len(shp2)) output_shape =", "optional Ephemeris to use. If not given, use the one", "the system could be an eclipsing or non-eclipsing binary, or", "in \" \"MoonSeparationConstraint.\") return mask class LocalTimeConstraint(Constraint): \"\"\" Constrain the", "from astroplan.constraints import LocalTimeConstraint >>> import datetime as dt >>>", "cls(min, max, **kwargs) @classmethod def grey(cls, min=0.25, max=0.65, **kwargs): \"\"\"", "by the secant of the zenith angle. .. note:: The", "a minimum of 0.25 and a maximum of 0.65 Parameters", "(inclusive). `None` indicates no limit. \"\"\" return cls(min, max, **kwargs)", "1) less_than_min : 0 or 1 what is returned for", "self.max = max if self.min is None and self.max is", "as time between nautical twilights (-12 degrees). \"\"\" return cls(max_solar_altitude=-12*u.deg,", "Sun to be below ``horizon``. \"\"\" @u.quantity_input(horizon=u.deg) def __init__(self, max_solar_altitude=0*u.deg,", "min_time, times < max_time) return mask class PrimaryEclipseConstraint(Constraint): \"\"\" Constrain", "mask = np.where(self.max > self.min, (phase >= self.min) & (phase", "observable'] * 24*u.hour tab.meta['times'] = times.datetime tab.meta['observer'] = observer tab.meta['constraints']", "times for ``observer``. Cache the result on the ``observer`` object.", "Observer.at_site(\"Subaru\", timezone=\"US/Hawaii\") >>> # bound times between 23:50 and 04:08", "within the limits and False for outside). If False, the", "twilights (-6 degrees). \"\"\" return cls(max_solar_altitude=-6*u.deg, **kwargs) @classmethod def twilight_nautical(cls,", "targkey = (targets,) except BaseException: targkey = (targets.frame.data.lon,) return timekey", "mask def is_always_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" A", "for constraint in constraints] constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return constraint_arr", "`None` (optional) Maximum acceptable separation between moon and target (inclusive).", "moon_up_mask else: raise ValueError(\"No max and/or min specified in \"", "def months_observable(constraints, observer, targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): \"\"\" Determines which month", "else: raise ValueError(\"No max and/or min specified in \" \"SunSeparationConstraint.\")", "dt >>> subaru = Observer.at_site(\"Subaru\", timezone=\"US/Hawaii\") >>> # bound times", "targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets.", "numpy as np from numpy.lib.stride_tricks import as_strided # Package from", "on which to test the constraint. targets : `~astropy.coordinates.SkyCoord` Target", "key to store these calculations in a dictionary. Parameters ----------", "Calculate next meridian transit for an array of times for", "targets. \"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters ---------- min", "targets that are observable between 23:50 and 04:08 local time:", "times=times, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) else:", "_current_year_time_range = Time( # needed for backward compatibility [str(_current_year) +", "acceptable value (rescales to 0) max_val : float best value", "combination of ``targets`` and ``times`` in a cache on an", "nonsense values. \"\"\" self.max_solar_altitude = max_solar_altitude self.force_pressure_zero = force_pressure_zero @classmethod", "is not None: if not isinstance(self.max, datetime.time): raise TypeError(\"Time limits", "target. In the current implementation the airmass is approximated by", "limit. boolean_constraint : bool If True, the constraint is treated", "are observable for an observer. \"\"\" from __future__ import (absolute_import,", "# find solar altitude at these times altaz = observer.altaz(times,", "> 1) or (max < 0) or (max > 1):", "``targets`` and ``times`` in a cache on an ``observer``` object.", "between the Sun and some targets. \"\"\" def __init__(self, min=None,", "0.5 hours. Returns ------- observable_months : list List of sets", "to 0) max_val : float best value cared about (rescales", "-------- rescale airmasses to between 0 and 1, with the", "0) # doctest: +FLOAT_CMP array([ 1. , 0.6, 0.2, 0.", "ensure we have a (1, 1) shape coord targets =", "a 3-clause BSD style license - see LICENSE.rst \"\"\" Specify", "(3) contains the moon illumination for those times. \"\"\" if", "hashable tuple for use as a cache key \"\"\" #", "not None: mask = ((self.min <= solar_separation) & (solar_separation <=", "mask = ((self.min <= solar_separation) & (solar_separation <= self.max)) else:", "and it's observability: ``'target name'``, ``'ever observable'``, ``'always observable'``, and", "observer tab.meta['constraints'] = constraints return tab def min_best_rescale(vals, min_val, max_val,", "to 0. For values outside the range, the rescale should", "the second number: >>> constraint = PhaseConstraint(binary, min=0.6, max=0.2) \"\"\"", "cls(min, max, **kwargs) @classmethod def bright(cls, min=0.65, max=None, **kwargs): \"\"\"", "of same length as ``targets`` for whether or not each", "secant of the zenith angle. .. note:: The ``max`` and", "tuple for use as a cache key \"\"\" # make", "below 35 and 1 above 60. >>> from astroplan.constraints import", "constraints. Returns ------- constraint_result : 2D array of float or", "min=None, max=None, ephemeris=None): \"\"\" Parameters ---------- min : `~astropy.units.Quantity` or", "times_ing = times_ingress_egress[:, 0] times_egr = times_ingress_egress[:, 1] applied_constraints_ing =", "return zero) Returns ------- array of floats between 0 and", "35 and 1 above 60. >>> from astroplan.constraints import max_best_rescale", "months_observable = [] for target, observable in zip(targets, constraint_arr): s", "observer, targets) # make sure the output has the same", "def compute_constraint(self, times, observer, targets): timezone = None # get", "former calculates the separation in the frame of the moon", "``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of", "times during primary eclipse. \"\"\" def __init__(self, eclipsing_system): \"\"\" Parameters", "the same shape and # broadcast these to find the", "if times is None and time_range is not None: times", "mask = np.bool_((times.datetime.time() >= min_time) or (times.datetime.time() <= max_time)) return", "@classmethod def twilight_civil(cls, **kwargs): \"\"\" Consider nighttime as time between", "observability: ``'target name'``, ``'ever observable'``, ``'always observable'``, and ``'fraction of", "`None` indicates no limit. max : float or `None` (optional)", "Returns ------- ever_observable : list List of booleans of same", "def twilight_astronomical(cls, **kwargs): \"\"\" Consider nighttime as time between astronomical", "transit for an array of times for ``targets`` and ``observer``.", "causes small <1 deg # innacuracies, but it is needed", "time (inclusive). `None` indicates no limit. max : `~datetime.time` Latest", "constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5", "max self.boolean_constraint = boolean_constraint def compute_constraint(self, times, observer, targets): cached_altaz", "= times.shape, targets.shape x = np.array([1]) a = as_strided(x, shape=shp1,", "not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if is_24hr_table: tab['time observable']", "self.max), (phase >= self.min) | (phase <= self.max)) return mask", "max def compute_constraint(self, times, observer, targets): separation = abs(targets.transform_to(Galactic).b) if", "indicates no limit. boolean_contstraint : bool Examples -------- To create", "observer. \"\"\" from __future__ import (absolute_import, division, print_function, unicode_literals) #", "time_grid_from_range from .target import get_skycoord __all__ = [\"AltitudeConstraint\", \"AirmassConstraint\", \"AtNightConstraint\",", "# the former calculates the separation in the frame of", "from astroplan import Observer >>> from astropy.time import Time >>>", "= (self.min <= secz) & (secz <= self.max) else: raise", "timezone = None # get timezone from time objects, or", "objects.\") def compute_constraint(self, times, observer, targets): with warnings.catch_warnings(): warnings.simplefilter('ignore') min_time", "from astroplan import PeriodicEvent >>> from astropy.time import Time >>>", "illumination = np.array(moon_illumination(times)) observer._moon_cache[aakey] = dict(times=times, illum=illumination, altaz=altaz) finally: if", "targets. \"\"\" def __init__(self, min=None, max=None, ephemeris=None): \"\"\" Parameters ----------", "number: >>> constraint = PhaseConstraint(binary, min=0.6, max=0.2) \"\"\" self.periodic_event =", "is_event_observable(constraints, observer, target, times=None, times_ingress_egress=None): \"\"\" Determines if the ``target``", "given constraints in ``constraints`` for a particular ``observer``. Parameters ----------", "0. , 0. , 0.2, 0.4, 0.8, 1. ]) \"\"\"", "hashing aakey = _make_cache_key(times, targets) if aakey not in observer._meridian_transit_cache:", "moon and some targets. \"\"\" def __init__(self, min=None, max=None, ephemeris=None):", "**kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of no minimum", "(localtime) time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine", "months_observable def observability_table(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Creates", "times along the second. \"\"\" if times is None and", "each time, given the constraints. \"\"\" if not hasattr(constraints, '__len__'):", "self.min is not None and self.max is not None: mask", "array of altitudes to be between 0 and 1, with", "min is None: self.min = -90*u.deg else: self.min = min", "self.min max_time = Time(\"2120-01-01T00:00:00\") if self.max is None else self.max", "min_time) or (t.time() <= max_time) for t in times.datetime]) except", "is None and self.min is not None: mask = self.min", "times_egr = times_ingress_egress[:, 1] applied_constraints_ing = [constraint(observer, target, times=times_ing, grid_times_targets=True)", "fractional illumination of the Earth's moon. Constraint is also satisfied", "on interval [0, 1). Default is zero. max : float", "constraint is treated as a boolean (True for within the", "present if the ``time_range`` is given as a scalar. It", "np.broadcast_to(result, output_shape) return result @abstractmethod def compute_constraint(self, times, observer, targets):", "targets): # use get_body rather than get sun here, since", "`~astropy.coordinates.solar_system_ephemeris` (which is set to 'builtin' by default). \"\"\" self.min", "GalacticLatitudeConstraint(Constraint): \"\"\" Constrain the distance between the Galactic plane and", "target, times=None, times_ingress_egress=None): \"\"\" Determines if the ``target`` is observable", "The times to compute the constraint. WHAT HAPPENS WHEN BOTH", "contains the moon illumination for those times. \"\"\" if not", "Actually do the real work of computing the constraint. Subclasses", "not None: mask = self.max >= solar_separation elif self.max is", "Maximum airmass of the target. `None` indicates no limit. min", "observer.pressure observer.pressure = 0 # find solar altitude at these", "supplied ``constraints``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational", "tends to mishandle negative Parameters ---------- min : `~astropy.units.Quantity` or", "'times' contains the times for the computations, (2) 'altaz' contains", "min_val : float worst acceptable value (rescales to 0) max_val", "targkey = (targets.frame.data.lon,) return timekey + targkey def _get_altaz(times, observer,", "0 and 1, with the best (60) going to 1", ">= self.min) | (phase <= self.max)) return mask def is_always_observable(constraints,", "is not None: mask = (self.max >= illumination) | moon_down_mask", "max : `~astropy.units.Quantity` or `None` Maximum altitude of the target", "> 1): raise ValueError('The minimum of the PhaseConstraint must be", "float (optional) Minimum phase (inclusive) on interval [0, 1). Default", "constraints. if targets.isscalar: # ensure we have a (1, 1)", "24 hour period centered on that time. time_grid_resolution : `~astropy.units.Quantity`", "to one, and the ``max_val`` goes to zero. Parameters ----------", "`~astropy.time.Time` Array of times on which to test the constraint.", "# bound times between 23:50 and 04:08 local Hawaiian time", "useful if not all observing blocks are valid over the", "of times on which to test the constraint. observer :", "= max if max is not None else 1.0 def", "Array of mid-event times on which to test the constraints", "NotImplementedError class AltitudeConstraint(Constraint): \"\"\" Constrain the altitude of the target.", "{} aakey = _make_cache_key(times, 'sun') if aakey not in observer._altaz_cache:", "0. For values outside the range, the rescale should return", "force_zero_pressure=False): \"\"\" Calculate alt/az for ``target`` at times linearly spaced", "cache the altitude observer._altaz_cache[aakey] = dict(times=times, altitude=altitude) finally: if self.force_pressure_zero:", "`~astropy.coordinates.AltAz` frame tends to mishandle negative Parameters ---------- min :", "or `is_always_observable`. \"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters ----------", "(moon_separation <= self.max)) else: raise ValueError(\"No max and/or min specified", "is_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Determines if the", "= np.broadcast_to(result, output_shape) return result @abstractmethod def compute_constraint(self, times, observer,", "return constraint_arr def months_observable(constraints, observer, targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): \"\"\" Determines", "(min_val - max_val) below = vals < min_val above =", "# make sure the output has the same shape as", "the target. .. note:: This can misbehave if you try", "numpy rules. Returns ------- constraint_result : 1D or 2D array", "min self.max = max self.boolean_constraint = boolean_constraint def compute_constraint(self, times,", "constrain observations on orbital phases between 0.6 and 1.2, for", "`None` indicates no limit. boolean_contstraint : bool Examples -------- To", "``time_resolution``. Default is 0.5 hours. Returns ------- ever_observable : list", "corresponding alt/az coordinates at those times and (3) contains the", "given, use the one set with `~astropy.coordinates.solar_system_ephemeris` (which is set", "This can misbehave if you try to constrain negative altitudes,", "so that ``vals`` equal to ``max_val`` equal 0 and those", "zero and one), where the ``min_val`` goes to one, and", "min_time = self.min = datetime.time(0, 0, 0) if self.max is", "and targets against each other if targets is not None:", "the first index and times along the second. Otherwise, we", "to compute the constraint observer : `~astroplan.Observer` the observaton location", "twilights (-12 degrees). \"\"\" return cls(max_solar_altitude=-12*u.deg, **kwargs) @classmethod def twilight_astronomical(cls,", "\"\"\" Consider nighttime as time between nautical twilights (-12 degrees).", "to tuple for hashing aakey = _make_cache_key(times, targets) if aakey", "``times`` in a cache on an ``observer``` object. This routine", "as would result from # broadcasting times and targets against", "could be an eclipsing or non-eclipsing binary, or exoplanet system.", "etc. \"\"\" # TODO: This method could be sped up", "the common case for users who care about the upper", "/ (min_val - max_val) below = vals < min_val above", "in a cache on an ``observer``` object. This routine will", "\"\"\" self.eclipsing_system = eclipsing_system def compute_constraint(self, times, observer=None, targets=None): mask", "\"\"\" return cls(min, max, **kwargs) @classmethod def grey(cls, min=0.25, max=0.65,", "Array of times on which to test the constraint. observer", "from astropy.time import Time >>> import astropy.units as u >>>", ">>> import numpy as np >>> altitudes = np.array([20, 30,", "`~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets Returns ------- time_dict", "times along the second. Otherwise, we rely on broadcasting the", "minimum of the PhaseConstraint must be within' ' the interval", "targets, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints)", "= [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in", "mask class PhaseConstraint(Constraint): \"\"\" Constrain observations to times in some", "0]) >>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest:", "else: altitude = observer._altaz_cache[aakey]['altitude'] return altitude def compute_constraint(self, times, observer,", "If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which", "times, observer, targets): # first is the moon up? cached_moon", "on orbital phases between 0.4 and 0.6, >>> from astroplan", "<= self.max), (phase >= self.min) | (phase <= self.max)) return", "altitude of the Sun that can occur when the Sun", "to test the constraint. observer : `~astroplan.Observer` The observer who", "for hashing aakey = _make_cache_key(times, targets) if aakey not in", "and 04:08 local Hawaiian time >>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8))", "maps to 1, February maps to 2, etc. \"\"\" #", "eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in primary eclipse.", "has set. \"\"\" def __init__(self, min=None, max=None, ephemeris=None): \"\"\" Parameters", "\"SecondaryEclipseConstraint\", \"Constraint\", \"TimeConstraint\", \"observability_table\", \"months_observable\", \"max_best_rescale\", \"min_best_rescale\", \"PhaseConstraint\", \"is_event_observable\"] _current_year", "and self.max is None: raise ValueError(\"You must at least supply", "as a scalar. It also contains metadata entries ``'times'`` (with", "Target or list of targets. Returns ------- cache_key : tuple", "0.4 and 0.6, >>> from astroplan import PeriodicEvent >>> from", "The table contains four columns with information about the target", "constraints in ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints", "# Licensed under a 3-clause BSD style license - see", "ingress and egress times for ``N`` events, with shape (``N``,", "mask = self.eclipsing_system.in_secondary_eclipse(times) return mask class PhaseConstraint(Constraint): \"\"\" Constrain observations", "by ``time_resolution``. Default is 0.5 hours. Returns ------- observable_months :", "and the ``min_val`` goes to zero. Parameters ---------- vals :", "targets that are observable between 2016-03-28 and 2016-03-30: >>> from", "is GCRS, and that is what we want. moon_separation =", "[0, 1).') self.min = min if min is not None", "an array of times for ``observer``. Cache the result on", "mask = self.min <= moon_separation elif self.min is not None", "small <1 deg # innacuracies, but it is needed until", ">>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) \"\"\" self.min = min self.max", "'get_sun' returns ICRS coords. sun = get_body('sun', times, location=observer.location) solar_separation", "else self.min # values below 1 should be disregarded return", "(solar_separation <= self.max)) else: raise ValueError(\"No max and/or min specified", "equal 1 Examples -------- rescale an array of altitudes to", "finally: if self.force_pressure_zero: observer.pressure = observer_old_pressure else: altitude = observer._altaz_cache[aakey]['altitude']", ": dict Dictionary containing three key-value pairs. (1) 'times' contains", "In the current implementation the airmass is approximated by the", "with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). \"\"\"", "times, observer, targets): with warnings.catch_warnings(): warnings.simplefilter('ignore') min_time = Time(\"1950-01-01T00:00:00\") if", "= True applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True)", "<= solar_separation) & (solar_separation <= self.max)) else: raise ValueError(\"No max", "targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis] else: targets = targets[..., np.newaxis]", "self.min.tzinfo elif self.max is not None: timezone = self.max.tzinfo if", "``min_val`` equal 0 and those equal to ``max_val`` equal 1", "time range with a specific observing block. This can be", "cls(max_solar_altitude=-6*u.deg, **kwargs) @classmethod def twilight_nautical(cls, **kwargs): \"\"\" Consider nighttime as", "'fraction of time observable'] target_names = [target.name for target in", "times=None, time_range=None, time_grid_resolution=0.5*u.hour, grid_times_targets=False): \"\"\" Compute the constraint for this", "``N`` events, with shape (``N``, 2). Returns ------- event_observable :", "note:: The ``max`` and ``min`` arguments appear in the order", "these to find the correct shape shp1, shp2 = times.shape,", "tab.meta['observer'] = observer tab.meta['constraints'] = constraints return tab def min_best_rescale(vals,", "min self.max = max def compute_constraint(self, times, observer, targets): #", "not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is", "float or bool The constraints, with targets along the first", "self.max is not None: max_time = self.max else: max_time =", "coords try: if hasattr(targets, 'frame'): # treat as a SkyCoord", "than ``min_val`` should also return one, in some cases it", "LocalTimeConstraint(Constraint): \"\"\" Constrain the observable hours. \"\"\" def __init__(self, min=None,", "care about the upper limit on the airmass (``max``) and", ">>> from astropy.time import Time >>> subaru = Observer.at_site(\"Subaru\") >>>", "<= times.datetime.time() <= max_time) # If time boundaries straddle midnight:", ": `~astropy.units.Quantity` or `None` Minimum altitude of the target (inclusive).", "`~astropy.time.Time` (length = 2) Lower and upper bounds on time", "def compute_constraint(self, times, observer, targets): cached_altaz = _get_altaz(times, observer, targets)", "# needed for backward compatibility _current_year_time_range = Time( # needed", "targets) secz = cached_altaz['altaz'].secz.value if self.boolean_constraint: if self.min is None", ": bool (optional) Force the pressure to zero for solar", "is what we want. moon_separation = moon.separation(targets) if self.min is", "= [constraint(observer, target, times=times_egr, grid_times_targets=True) for constraint in constraints] constraint_arr", "bounds on time sequence If ``time_range`` is not specified, defaults", "\"observability_table\", \"months_observable\", \"max_best_rescale\", \"min_best_rescale\", \"PhaseConstraint\", \"is_event_observable\"] _current_year = time.localtime().tm_year #", "= 2) Lower and upper bounds on time sequence. time_grid_resolution", "times. \"\"\" if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} #", "max, **kwargs) def compute_constraint(self, times, observer, targets): # first is", "timezone = self.min.tzinfo elif self.max is not None: timezone =", "name', 'ever observable', 'always observable', 'fraction of time observable'] target_names", "initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of no minimum and a", "\"\"\" if not hasattr(constraints, '__len__'): constraints = [constraints] applied_constraints =", "secz) & (secz <= self.max) else: raise ValueError(\"No max and/or", "Earth's moon. Constraint is also satisfied if the Moon has", "times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr =", "times, observer, targets): if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {}", "is not None: if not isinstance(self.min, datetime.time): raise TypeError(\"Time limits", "valid over the time limits used in calls to `is_observable`", "below 1 should be disregarded return min_best_rescale(secz, mi, mx, less_than_min=0)", "TODO: these broadcasting operations are relatively slow # but there", "moon_separation) & (moon_separation <= self.max)) else: raise ValueError(\"No max and/or", "float best value cared about (rescales to 1) greater_than_max :", "(with an array of all the times), ``'observer'`` (the `~astroplan.Observer`", "_make_cache_key(times, targets) if aakey not in observer._altaz_cache: try: if force_zero_pressure:", "= self.min <= solar_separation elif self.min is not None and", "limit. \"\"\" return cls(min, max, **kwargs) @classmethod def bright(cls, min=0.65,", "of the ``targets``. The table contains four columns with information", "of 0.25 and a maximum of 0.65 Parameters ---------- min", "get_body('sun', times, location=observer.location) solar_separation = sun.separation(targets) if self.min is None", "= np.all(constraint_arr, axis=1) frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1] tab", "one from the second number: >>> constraint = PhaseConstraint(binary, min=0.6,", "shape (``N``, 2). Returns ------- event_observable : `~numpy.ndarray` Array of", "return nonsense values. \"\"\" self.max_solar_altitude = max_solar_altitude self.force_pressure_zero = force_pressure_zero", "`~astroplan.Observer` the observation location from which to apply the constraints", "np.array([1]) a = as_strided(x, shape=shp1, strides=[0] * len(shp1)) b =", "constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) months_observable = [] for", "observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}", "of targets times : `~astropy.time.Time` (optional) Array of times on", "we want. moon_separation = moon.separation(targets) if self.min is None and", "{list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets. observer :", "Determines which month the specified ``targets`` are observable for a", "Parameters ---------- max_solar_altitude : `~astropy.units.Quantity` The altitude of the sun", "(self.max >= illumination) | moon_down_mask elif self.max is None and", "---------- periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass System on which to", "along the second. \"\"\" # Should be implemented on each", "for t in times.datetime]) except BaseException: # use np.bool so", "and self.max is not None: mask = ((self.min <= separation)", "<gh_stars>100-1000 # Licensed under a 3-clause BSD style license -", "constraint. targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of", "is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if grid_times_targets: targets", "in \" \"MoonSeparationConstraint.\") return mask class MoonIlluminationConstraint(Constraint): \"\"\" Constrain the", "= (vals - max_val) / (min_val - max_val) below =", "def is_event_observable(constraints, observer, target, times=None, times_ingress_egress=None): \"\"\" Determines if the", "= -90*u.deg else: self.min = min if max is None:", "0 and those equal to ``min_val`` equal 1 Examples --------", "of `~astroplan.Target` The targets on which to apply the constraints.", "astropy.units as u >>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>>", "0 # find solar altitude at these times altaz =", "return tab def min_best_rescale(vals, min_val, max_val, less_than_min=1): \"\"\" rescales an", "as ``targets`` for whether or not each target is ever", "times : `~astropy.time.Time` Array of times on which to test", "Target times : `~astropy.time.Time` (optional) Array of mid-event times on", ": float or `None` (optional) Maximum acceptable fractional illumination (inclusive).", "target is observable in the time range given the constraints.", "max_best_rescale(alt, self.min, self.max) class AirmassConstraint(AltitudeConstraint): \"\"\" Constrain the airmass of", "time boundaries straddle midnight: else: try: mask = np.array([(t.time() >=", "= self.min = datetime.time(0, 0, 0) if self.max is not", "on each subclass of Constraint raise NotImplementedError class AltitudeConstraint(Constraint): \"\"\"", "values below 1 should be disregarded return min_best_rescale(secz, mi, mx,", "= max self.boolean_constraint = boolean_constraint def compute_constraint(self, times, observer, targets):", "the Sun's coordinates in an observer # centred frame, so", "constraints in ``constraints`` for a particular ``observer``. Parameters ---------- constraints", "= ['target name', 'ever observable', 'always observable', 'fraction of time", "and worst (35) going to 0. For values outside the", "None: raise ValueError(\"You must at least supply either a minimum", "not None: mask = self.min <= moon_separation elif self.min is", "0.6 and 1.2, for example, you should subtract one from", "self.boolean_constraint = boolean_constraint def compute_constraint(self, times, observer, targets): cached_altaz =", "<= self.max)) else: raise ValueError(\"No max and/or min specified in", "None and self.max is not None: mask = (self.max >=", "aakey = _make_cache_key(times, 'sun') if aakey not in observer._altaz_cache: try:", "separated by ``time_resolution``. Default is 0.5 hours. Returns ------- ever_observable", "It also contains metadata entries ``'times'`` (with an array of", "are always observable throughout ``time_range`` given constraints in the ``constraints_list``", "Table containing the observability information for each of the ``targets``.", "times. \"\"\" if not hasattr(observer, '_moon_cache'): observer._moon_cache = {} #", "constraint observer : `~astroplan.Observer` The observer who has constraints ``constraints``", "for a particular ``observer``. Parameters ---------- constraints : list or", "twilight_astronomical(cls, **kwargs): \"\"\" Consider nighttime as time between astronomical twilights", "if not hasattr(constraints, '__len__'): constraints = [constraints] if times is", "{} # convert times, targets to tuple for hashing aakey", "bool The constraints. If 2D with targets along the first", "to support the common case for users who care about", "= get_body('sun', times, location=observer.location) solar_separation = sun.separation(targets) if self.min is", "astropy PR #5897 is released # which should be astropy", "observer.pressure = observer_old_pressure return observer._altaz_cache[aakey] def _get_moon_data(times, observer, force_zero_pressure=False): \"\"\"", "shape=shp1, strides=[0] * len(shp1)) b = as_strided(x, shape=shp2, strides=[0] *", "min=None, max=None, boolean_constraint=True): if min is None: self.min = -90*u.deg", "an observer. \"\"\" from __future__ import (absolute_import, division, print_function, unicode_literals)", "self.min = datetime.time(0, 0, 0) if self.max is not None:", "to simple numpy arrays of the same shape and #", "# doctest: +FLOAT_CMP array([ 0. , 0. , 0.2, 0.4,", "be implemented on each subclass of Constraint raise NotImplementedError class", "boolean (True for within the limits and False for outside).", "not None: applied_constraints = [constraint(observer, target, times=times, grid_times_targets=True) for constraint", "max=None): \"\"\" Parameters ---------- periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass System", "if not hasattr(constraints, '__len__'): constraints = [constraints] is_24hr_table = False", "observer.pressure = 0 altaz = observer.moon_altaz(times) illumination = np.array(moon_illumination(times)) observer._moon_cache[aakey]", "illumination (inclusive). `None` indicates no limit. max : float or", "each target is ever observable in the time range given", "of targets. observer : `~astroplan.Observer` The observer who has constraints", "be for a 24 hour period centered on that time.", "which is GCRS, and that is what we want. moon_separation", "= min self.max = max self.boolean_constraint = boolean_constraint def compute_constraint(self,", "targets): solar_altitude = self._get_solar_altitudes(times, observer, targets) mask = solar_altitude <=", "(optional) Lower and upper bounds on time sequence If ``time_range``", "not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} aakey = _make_cache_key(times, 'sun')", "between 0.6 and 1.2, for example, you should subtract one", "to test the constraint. targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target", "boundaries straddle midnight: else: try: mask = np.array([(t.time() >= min_time)", "not None: mask = secz <= self.max elif self.max is", "= SkyCoord(np.tile(targets, 1))[:, np.newaxis] else: targets = targets[..., np.newaxis] times,", "\"\"\" if not hasattr(constraints, '__len__'): constraints = [constraints] if times", "find the correct shape shp1, shp2 = times.shape, targets.shape x", "target (inclusive). `None` indicates no limit. \"\"\" self.min = min", "grid_times_targets=True) for constraint in constraints] applied_constraints_egr = [constraint(observer, target, times=times_egr,", "# altitude calculations. if not hasattr(constraints, '__len__'): constraints = [constraints]", "rescales an input array ``vals`` to be a score (between", "LocalTimeConstraint >>> import datetime as dt >>> subaru = Observer.at_site(\"Subaru\",", "is returned for ``vals`` below ``min_val``. (in some cases anything", "and self.max is not None: mask = (self.min <= secz)", "\" \"maximum time.\") if self.min is not None: if not", "secz = cached_altaz['altaz'].secz.value if self.boolean_constraint: if self.min is None and", "output_shape != np.array(result).shape: result = np.broadcast_to(result, output_shape) return result @abstractmethod", "= dict(times=times, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return", "month the specified ``targets`` are observable for a specific ``observer``,", "two times in ``time_range`` with grid spacing ``time_resolution`` for ``observer``.", "None: mask = ((self.min <= solar_separation) & (solar_separation <= self.max))", "an observer # centred frame, so the separation is as-seen", "\"\"\" @u.quantity_input(horizon=u.deg) def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True): \"\"\" Parameters ---------- max_solar_altitude", "hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} # convert times, targets to", "targets): \"\"\" Actually do the real work of computing the", ">>> constraint = PhaseConstraint(binary, min=0.4, max=0.6) The minimum and maximum", "at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours.", "time observable'``. The column ``'time observable'`` will also be present", "This will be passed as the first argument into `~astroplan.time_grid_from_range`.", "to be rescaled to be between 0 and 1 min_val", "BaseException: # must be scalar timekey = (times.jd,) # make", "x = np.array([1]) a = as_strided(x, shape=shp1, strides=[0] * len(shp1))", "the second. \"\"\" # Should be implemented on each subclass", "the second. Otherwise, we rely on broadcasting the shapes together", "constraints to determine which targets are observable for an observer.", "+ targkey def _get_altaz(times, observer, targets, force_zero_pressure=False): \"\"\" Calculate alt/az", "is_24hr_table: tab['time observable'] = tab['fraction of time observable'] * 24*u.hour", "= _get_altaz(times, observer, targets) secz = cached_altaz['altaz'].secz.value if self.boolean_constraint: if", "`~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between moon and", "up? cached_moon = _get_moon_data(times, observer) moon_alt = cached_moon['altaz'].alt moon_down_mask =", "targets = observer._preprocess_inputs(times, targets, grid_times_targets=False) result = self.compute_constraint(times, observer, targets)", "lot by dropping to the trigonometric # altitude calculations. if", "Observer >>> from astropy.time import Time >>> subaru = Observer.at_site(\"Subaru\")", "range of phases for a periodic event (e.g.~transiting exoplanets, eclipsing", "not None: if not isinstance(self.min, datetime.time): raise TypeError(\"Time limits must", "time observable'] target_names = [target.name for target in targets] ever_obs", "observations to times during secondary eclipse. \"\"\" def __init__(self, eclipsing_system):", "self.min is not None: mask = self.min <= secz elif", "pairs. (1) 'times' contains the times for the computations, (2)", "the PhaseConstraint must be within' ' the interval [0, 1).')", "themselves # prior to evaluating multiple constraints. if targets.isscalar: #", "and 04:08 local time: >>> from astroplan import Observer >>>", "correct shape shp1, shp2 = times.shape, targets.shape x = np.array([1])", "note:: This can misbehave if you try to constrain negative", "must be within' ' the interval [0, 1).') self.min =", "= min self.max = max if self.min is None and", "class AltitudeConstraint(Constraint): \"\"\" Constrain the altitude of the target. ..", "can be useful if not all observing blocks are valid", "# moon.separation(targets) is NOT the same as targets.separation(moon) # the", "observations on orbital phases between 0.4 and 0.6, >>> from", "containing three key-value pairs. (1) 'times' contains the times for", "---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic", "is not None else 1.0 def compute_constraint(self, times, observer=None, targets=None):", "rescaled[below] = less_than_min rescaled[above] = 0 return rescaled def max_best_rescale(vals,", "along the first index and times along the second. Otherwise,", "to one, and the ``min_val`` goes to zero. Parameters ----------", "observer_old_pressure return observer._altaz_cache[aakey] def _get_moon_data(times, observer, force_zero_pressure=False): \"\"\" Calculate moon", "time_dict : dict Dictionary containing a key-value pair. 'times' contains", "None and self.max is not None: mask = (self.min <=", "min_val above = vals > max_val rescaled[below] = less_than_min rescaled[above]", "`~astroplan.Observer` the observaton location from which to apply the constraints", "is None and self.max is None: raise ValueError(\"You must at", "targets): # removed the location argument here, which causes small", "who has constraints ``constraints``. force_zero_pressure : bool Forcefully use 0", "BSD style license - see LICENSE.rst \"\"\" Specify and constraints", "spacing ``time_resolution``. This will be passed as the first argument", "``observer``. Cache the result on the ``observer`` object. Parameters ----------", "least supply either a minimum or a maximum time.\") if", "a scalar. It also contains metadata entries ``'times'`` (with an", "-------- Constrain the observations to targets that are observable between", "import Time import astropy.units as u from astropy.coordinates import get_body,", "distance between the Earth's moon and some targets. \"\"\" def", "the constraints in ``constraints_list`` for ``observer``. Parameters ---------- constraints :", "dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey] @abstractmethod class Constraint(object): \"\"\" Abstract class for", "< min_val above = vals > max_val rescaled[below] = 0", "limit. min : float or `None` Minimum airmass of the", "self.max is None and self.min is not None: mask =", "(inclusive). `None` indicates no limit. max : `~datetime.time` Latest local", "anything higher than ``max_val`` should also return one, in some", ">= 0 illumination = cached_moon['illum'] if self.min is None and", ": `~astroplan.Observer` The observer who has constraints ``constraints``. force_zero_pressure :", "self.max is None: raise ValueError(\"You must at least supply either", "and 1 inclusive rescaled so that ``vals`` equal to ``max_val``", "is not None: timezone = self.max.tzinfo if timezone is None:", "\"\"\" self.min = min self.max = max if self.min is", "if self.min is not None: min_time = self.min else: min_time", "periodic_event, min=None, max=None): \"\"\" Parameters ---------- periodic_event : `~astroplan.periodic.PeriodicEvent` or", "[constraint(observer, targets, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr =", "``'fraction of time observable'``. The column ``'time observable'`` will also", "observer._moon_cache[aakey] = dict(times=times, illum=illumination, altaz=altaz) finally: if force_zero_pressure: observer.pressure =", "max, **kwargs) @classmethod def bright(cls, min=0.65, max=None, **kwargs): \"\"\" initialize", "Hawaiian time >>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) \"\"\" self.min =", "the ``max_val`` goes to one, and the ``min_val`` goes to", "maximum of 0.25 Parameters ---------- min : float or `None`", "plane and some targets. \"\"\" def __init__(self, min=None, max=None): \"\"\"", "= (vals - min_val) / (max_val - min_val) below =", "= np.where(self.max > self.min, (phase >= self.min) & (phase <=", "or `None` (optional) Minimum acceptable separation between Sun and target", "speedup if the end user # disables gridding and re-shapes", "`~astroplan.Observer` The observer who has constraints ``constraints``. force_zero_pressure : bool", "some cases anything less than ``min_val`` should also return one,", "Determines if the ``target`` is observable at each time in", "\"\"\" Constrain the fractional illumination of the Earth's moon. Constraint", "Cache the result on the ``observer`` object. Parameters ---------- times", "the constraints times_ingress_egress : `~astropy.time.Time` (optional) Array of ingress and", "`None` indicates no limit. \"\"\" return cls(min, max, **kwargs) @classmethod", "(inclusive). `None` indicates no limit. max : float or `None`", "min_time < max_time: try: mask = np.array([min_time <= t.time() <=", "bool if True, grids the constraint result with targets along", "should return zero) Returns ------- array of floats between 0", "the min altitude and 1 is the max. \"\"\" def", "= sun.separation(targets) if self.min is None and self.max is not", "acceptable Galactic latitude of target (inclusive). `None` indicates no limit.", "# note to future editors - the order matters here", "The constraints. If 2D with targets along the first index", "negative altitudes, as the `~astropy.coordinates.AltAz` frame tends to mishandle negative", "hours. Returns ------- observability_table : `~astropy.table.Table` A Table containing the", "given as a scalar. It also contains metadata entries ``'times'``", "Returns ------- array of floats between 0 and 1 inclusive", "0. , 0.2, 0.4, 0.8, 1. ]) \"\"\" rescaled =", "a constraint that requires the airmass be \"better than 2\",", "compute_constraint(self, times, observer, targets): # first is the moon up?", "the trigonometric # altitude calculations. applied_constraints = [constraint(observer, targets, times=times,", "this. Parameters ---------- times : `~astropy.time.Time` The times to compute", "no limit. max : `~astropy.time.Time` Latest time (inclusive). `None` indicates", "are relatively slow # but there is potential for huge", "if not hasattr(observer, '_moon_cache'): observer._moon_cache = {} # convert times", "attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: # assume targets", "---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in primary", "if self.min is None and self.max is None: raise ValueError(\"You", "(containing the supplied ``constraints``). \"\"\" if not hasattr(constraints, '__len__'): constraints", "self.max mask = np.logical_and(times > min_time, times < max_time) return", "cared about (rescales to 1) less_than_min : 0 or 1", "grid_times_targets=False) result = self.compute_constraint(times, observer, targets) # make sure the", "_get_altaz(times, observer, targets) alt = cached_altaz['altaz'].alt if self.boolean_constraint: lowermask =", "self.max is not None: mask = ((self.min <= moon_separation) &", "time limits occur on same day: if min_time < max_time:", "targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\" Determines if the ``targets`` are", "to ``min_val`` equal 1 Examples -------- rescale airmasses to between", "= observer tab.meta['constraints'] = constraints return tab def min_best_rescale(vals, min_val,", "grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) months_observable =", "for target in targets] ever_obs = np.any(constraint_arr, axis=1) always_obs =", "@classmethod def dark(cls, min=None, max=0.25, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint`", "shape queries don't cause problems mask = np.bool_(min_time <= times.datetime.time()", "targets times : `~astropy.time.Time` (optional) Array of times on which", "as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional)", "= [constraints] times = time_grid_from_range(time_range, time_grid_resolution) # TODO: This method", "be in secondary eclipse. \"\"\" self.eclipsing_system = eclipsing_system def compute_constraint(self,", "\"\"\" Constrain the altitude of the target. .. note:: This", "(max < 0) or (max > 1): raise ValueError('The minimum", "given the supplied ``constraints``. Parameters ---------- constraints : list or", "= np.array(moon_illumination(times)) observer._moon_cache[aakey] = dict(times=times, illum=illumination, altaz=altaz) finally: if force_zero_pressure:", "at each time in ``times``, given constraints in ``constraints`` for", "a float on [0, 1], where 0 is the min", "calculations for a combination of ``targets`` and ``times`` in a", "Maximum phase (inclusive) on interval [0, 1). Default is one.", "np.bool so shape queries don't cause problems mask = np.bool_(min_time", "``min_val`` goes to zero. Parameters ---------- vals : array-like the", "alt/az coordinates at those times. \"\"\" if not hasattr(observer, '_altaz_cache'):", "to determine which targets are observable for an observer. \"\"\"", "observer, targets): solar_altitude = self._get_solar_altitudes(times, observer, targets) mask = solar_altitude", "implemented on each subclass of Constraint raise NotImplementedError class AltitudeConstraint(Constraint):", "self.max)) return mask def is_always_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour):", "interval [0, 1). Default is one. Examples -------- To constrain", "for those times. \"\"\" if not hasattr(observer, '_moon_cache'): observer._moon_cache =", "objects, or from observer if self.min is not None: timezone", "to `is_observable` or `is_always_observable`. \"\"\" def __init__(self, min=None, max=None): \"\"\"", "could be sped up a lot by dropping to the", "over the requested ``time_range``, given the constraints in ``constraints_list`` for", "strides=[0] * len(shp1)) b = as_strided(x, shape=shp2, strides=[0] * len(shp2))", "eclipse. \"\"\" self.eclipsing_system = eclipsing_system def compute_constraint(self, times, observer=None, targets=None):", "a target is observable, one set per target. These integers", "for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) colnames = ['target", "& (separation <= self.max)) else: raise ValueError(\"No max and/or min", "grey(cls, min=0.25, max=0.65, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults", "who care about the upper limit on the airmass (``max``)", "a maximum of 0.65 Parameters ---------- min : float or", "'_moon_cache'): observer._moon_cache = {} # convert times to tuple for", "function to determine whether ``targets`` are always observable throughout ``time_range``", "by ``time_resolution``. Default is 0.5 hours. Returns ------- ever_observable :", "twilights (-18 degrees). \"\"\" return cls(max_solar_altitude=-18*u.deg, **kwargs) def _get_solar_altitudes(self, times,", "to times during primary eclipse. \"\"\" def __init__(self, eclipsing_system): \"\"\"", "subaru = Observer.at_site(\"Subaru\", timezone=\"US/Hawaii\") >>> # bound times between 23:50", "grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.any(constraint_arr,", "\"\"\" # Should be implemented on each subclass of Constraint", "for the alt/az computations, (2) 'altaz' contains the corresponding alt/az", "min if max is None: self.max = 90*u.deg else: self.max", "are observable for a specific ``observer``, given the supplied ``constraints``.", "target and it's observability: ``'target name'``, ``'ever observable'``, ``'always observable'``,", "= np.sum(constraint_arr, axis=1) / constraint_arr.shape[1] tab = table.Table(names=colnames, data=[target_names, ever_obs,", "mask = self.min <= separation elif self.min is not None", "float or bool The constraints. If 2D with targets along", "supply either a minimum or a maximum time.\") if self.min", "(inclusive). `None` indicates no limit. ephemeris : str, optional Ephemeris", "The altitude of the sun below which it is considered", "`~astroplan.constraints.MoonIlluminationConstraint` with defaults of no minimum and a maximum of", ": `~astroplan.Observer` The observer who has constraints ``constraints`` target :", "be in primary eclipse. \"\"\" self.eclipsing_system = eclipsing_system def compute_constraint(self,", "for ``observer``. Cache the result on the ``observer`` object. Parameters", "the correct shape shp1, shp2 = times.shape, targets.shape x =", "when the Sun is below the horizon and the corrections", "self.min is None and self.max is None: raise ValueError(\"You must", "Default is zero. max : float (optional) Maximum phase (inclusive)", "The minimum and maximum phase must be described on the", "the ``targets`` over the requested ``time_range``, given the constraints in", "axis=1) frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1] tab = table.Table(names=colnames,", "Make a unique key to reference this combination of ``times``", "mask class SecondaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times during secondary", "should return 0 below 35 and 1 above 60. >>>", "= np.bool_((times.datetime.time() >= min_time) or (times.datetime.time() <= max_time)) return mask", "other if targets is not None: # broadcasting times v", "times and targets against each other if targets is not", "bounds on time sequence. time_grid_resolution : `~astropy.units.quantity` Time-grid spacing grid_times_targets", "of computing the constraint. Subclasses override this. Parameters ---------- times", "value cared about (rescales to 1) greater_than_max : 0 or", "altitude observer._altaz_cache[aakey] = dict(times=times, altitude=altitude) finally: if self.force_pressure_zero: observer.pressure =", "time. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine", "must at least supply either a minimum or a \"", "be specified as datetime.time objects.\") if self.max is not None:", "= Time(\"2016-03-28T12:00:00\") >>> t2 = Time(\"2016-03-30T12:00:00\") >>> constraint = TimeConstraint(t1,t2)", "periodic event (e.g.~transiting exoplanets, eclipsing binaries). \"\"\" def __init__(self, periodic_event,", "times, observer, targets): solar_altitude = self._get_solar_altitudes(times, observer, targets) mask =", "to test the constraint. targets : `~astropy.coordinates.SkyCoord` Target or list", "timezone = self.max.tzinfo if timezone is None: timezone = observer.timezone", "np.logical_and.reduce(applied_constraints_egr)) return constraint_arr def months_observable(constraints, observer, targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): \"\"\"", "45, 55, 70]) >>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP", "index and times along the second. \"\"\" if times is", "import numpy as np >>> airmasses = np.array([1, 1.5, 2,", "``'times'`` (with an array of all the times), ``'observer'`` (the", "single (scalar) time, the table will be for a 24", "observer_old_pressure = observer.pressure observer.pressure = 0 # find solar altitude", "assume targets is a string. targkey = (targets,) except BaseException:", "range given the constraints. \"\"\" if not hasattr(constraints, '__len__'): constraints", "((self.min <= moon_separation) & (moon_separation <= self.max)) else: raise ValueError(\"No", "`None` indicates no limit. \"\"\" return cls(min, max, **kwargs) def", "limits must be specified as \" \"astropy.time.Time objects.\") if self.max", "is ever observable in the time range given the constraints.", "no limit. max : `~datetime.time` Latest local time (inclusive). `None`", "\"time_grid_from_range\", \"GalacticLatitudeConstraint\", \"SunSeparationConstraint\", \"MoonSeparationConstraint\", \"MoonIlluminationConstraint\", \"LocalTimeConstraint\", \"PrimaryEclipseConstraint\", \"SecondaryEclipseConstraint\", \"Constraint\", \"TimeConstraint\",", "= self.max.tzinfo if timezone is None: timezone = observer.timezone if", "{list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times :", "also satisfied if the Moon has set. \"\"\" def __init__(self,", "limit. \"\"\" self.min = min self.max = max def compute_constraint(self,", "= ((self.min <= moon_separation) & (moon_separation <= self.max)) else: raise", "self.max is not None: mask = ((self.min <= solar_separation) &", "and self.max is not None: mask = ((self.min <= moon_separation)", "observer.pressure observer.pressure = 0 altaz = observer.moon_altaz(times) illumination = np.array(moon_illumination(times))", "rescaled def max_best_rescale(vals, min_val, max_val, greater_than_max=1): \"\"\" rescales an input", "AirmassConstraint(AltitudeConstraint): \"\"\" Constrain the airmass of a target. In the", "compute_constraint(self, times, observer, targets): solar_altitude = self._get_solar_altitudes(times, observer, targets) mask", "is None: raise ValueError(\"Cannot have a float AirmassConstraint if max", "(optional) Minimum phase (inclusive) on interval [0, 1). Default is", "time_grid_resolution=0.5*u.hour): \"\"\" Creates a table with information about observability for", "force_pressure_zero @classmethod def twilight_civil(cls, **kwargs): \"\"\" Consider nighttime as time", "0) max_val : float best value cared about (rescales to", "SkyCoord from astropy import table import numpy as np from", "as the `~astropy.coordinates.AltAz` frame tends to mishandle negative Parameters ----------", "time_range = (time_range-12*u.hour, time_range+12*u.hour) is_24hr_table = True applied_constraints = [constraint(observer,", "Sun is below the horizon and the corrections for atmospheric", "\"MoonSeparationConstraint\", \"MoonIlluminationConstraint\", \"LocalTimeConstraint\", \"PrimaryEclipseConstraint\", \"SecondaryEclipseConstraint\", \"Constraint\", \"TimeConstraint\", \"observability_table\", \"months_observable\", \"max_best_rescale\",", "is returned for ``vals`` above ``max_val``. (in some cases anything", "grid spacing ``time_resolution`` for ``observer``. Cache the result on the", "Minimum airmass of the target. `None` indicates no limit. boolean_contstraint", "altitude = altaz.alt # cache the altitude observer._altaz_cache[aakey] = dict(times=times,", "[constraints] applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for", "\" \"SunSeparationConstraint.\") return mask class MoonSeparationConstraint(Constraint): \"\"\" Constrain the distance", "Calculate alt/az for ``target`` at times linearly spaced between the", "= max self.ephemeris = ephemeris @classmethod def dark(cls, min=None, max=0.25,", "Constrain observations to times during primary eclipse. \"\"\" def __init__(self,", "for whether or not each target is ever observable in", "time_range=None, time_grid_resolution=0.5*u.hour, grid_times_targets=False): \"\"\" Compute the constraint for this class", "is not None: mask = ((self.min <= separation) & (separation", "illumination) & (illumination <= self.max)) & moon_up_mask else: raise ValueError(\"No", "less_than_min=0) class AtNightConstraint(Constraint): \"\"\" Constrain the Sun to be below", "target (inclusive). `None` indicates no limit. ephemeris : str, optional", "requires the airmass be \"better than 2\", i.e. at a", "zero for solar altitude calculations. This avoids errors in the", "try: mask = np.array([(t.time() >= min_time) or (t.time() <= max_time)", "sequence of `~astroplan.Target` The targets on which to apply the", "to tuple for hashing aakey = _make_cache_key(times, 'moon') if aakey", ">>> subaru = Observer.at_site(\"Subaru\") >>> t1 = Time(\"2016-03-28T12:00:00\") >>> t2", "airmasses to between 0 and 1, with the best (1)", "targets): if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} aakey =", "The times to compute the constraint observer : `~astroplan.Observer` the", "def __init__(self, min=None, max=None, ephemeris=None): \"\"\" Parameters ---------- min :", "in the ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints", "January maps to 1, February maps to 2, etc. \"\"\"", "import table import numpy as np from numpy.lib.stride_tricks import as_strided", "min_val, max_val, greater_than_max=1): \"\"\" rescales an input array ``vals`` to", "Array of times on which to test the constraint time_range", "`~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between Sun and", "[constraints] is_24hr_table = False if hasattr(time_range, 'isscalar') and time_range.isscalar: time_range", "and illumination for an array of times for ``observer``. Cache", "``targets`` are always observable throughout ``time_range`` given constraints in the", "\"maximum time.\") if self.min is not None: if not isinstance(self.min,", "(in some cases anything higher than ``max_val`` should also return", "isinstance(self.min, Time): raise TypeError(\"Time limits must be specified as \"", "(optional) Array of mid-event times on which to test the", "of times on which to test the constraint observer :", "shapes together using standard numpy rules. Returns ------- constraint_result :", "\"\"\" Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be", "and a maximum of 0.65 Parameters ---------- min : float", "1). Default is one. Examples -------- To constrain observations on", "column ``'time observable'`` will also be present if the ``time_range``", "max, **kwargs) @classmethod def grey(cls, min=0.25, max=0.65, **kwargs): \"\"\" initialize", "import numpy as np from numpy.lib.stride_tricks import as_strided # Package", "apply the constraints. times : `~astropy.time.Time` The times to compute", "the observaton location from which to apply the constraints targets", "1, with the best (60) going to 1 and worst", "for ``vals`` below ``min_val``. (in some cases anything less than", "= dict(times=times, altitude=altitude) finally: if self.force_pressure_zero: observer.pressure = observer_old_pressure else:", "to targets that are observable between 23:50 and 04:08 local", "on interval [0, 1). Default is one. Examples -------- To", "alt/az for ``target`` at times linearly spaced between the two", "twilight_nautical(cls, **kwargs): \"\"\" Consider nighttime as time between nautical twilights", "observer, targets): \"\"\" Actually do the real work of computing", "``horizon``. \"\"\" @u.quantity_input(horizon=u.deg) def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True): \"\"\" Parameters ----------", "0.5 hours. Returns ------- ever_observable : list List of booleans", "constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return constraint_arr def months_observable(constraints, observer, targets,", "class would be to associate an acceptable time range with", "import get_body, get_sun, get_moon, Galactic, SkyCoord from astropy import table", "as targets.separation(moon) # the former calculates the separation in the", "Determines if the ``targets`` are observable during ``time_range`` given constraints", "observer._meridian_transit_cache = {} # convert times to tuple for hashing", "by checking constraint at linearly-spaced times separated by ``time_resolution``. Default", "ValueError(\"No max and/or min specified in \" \"SunSeparationConstraint.\") return mask", "1, February maps to 2, etc. \"\"\" # TODO: This", "on orbital phases between 0.6 and 1.2, for example, you", "and time_range.isscalar: time_range = (time_range-12*u.hour, time_range+12*u.hour) is_24hr_table = True applied_constraints", "The targets on which to apply the constraints. times :", "as np from numpy.lib.stride_tricks import as_strided # Package from .moon", "egress times for ``N`` events, with shape (``N``, 2). Returns", "BaseException: mask = np.bool_((times.datetime.time() >= min_time) or (times.datetime.time() <= max_time))", "NOT the same as targets.separation(moon) # the former calculates the", "= targets[..., np.newaxis] times, targets = observer._preprocess_inputs(times, targets, grid_times_targets=False) result", "to ``max_val`` equal 0 and those equal to ``min_val`` equal", "ValueError(\"No max and/or min specified in \" \"AirmassConstraint.\") return mask", "times linearly spaced between the two times in ``time_range`` with", "\"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of no minimum and", "from # broadcasting times and targets against each other if", "return months_observable def observability_table(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): \"\"\"", "observaton location from which to apply the constraints targets :", "self.max = max self.ephemeris = ephemeris @classmethod def dark(cls, min=None,", "return mask class LocalTimeConstraint(Constraint): \"\"\" Constrain the observable hours. \"\"\"", "centered on that time. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range``", "are observable between 23:50 and 04:08 local time: >>> from", "self.max is not None: mask = (self.min <= secz) &", "min specified in \" \"SunSeparationConstraint.\") return mask class MoonSeparationConstraint(Constraint): \"\"\"", "as a boolean (True for within the limits and False", "latitude of target (inclusive). `None` indicates no limit. \"\"\" self.min", "hasattr(constraints, '__len__'): constraints = [constraints] is_24hr_table = False if hasattr(time_range,", "use case for this class would be to associate an", "an ``observer``` object. This routine will provide an appropriate, hashable,", "constraint_result : 2D array of float or bool The constraints,", "time_grid_resolution=0.5*u.hour, grid_times_targets=False): \"\"\" Compute the constraint for this class Parameters", "_current_year = time.localtime().tm_year # needed for backward compatibility _current_year_time_range =", "None and self.min is not None: mask = self.min <=", "observable between 2016-03-28 and 2016-03-30: >>> from astroplan import Observer", "observational constraints. \"\"\" __metaclass__ = ABCMeta def __call__(self, observer, targets,", "Time-grid spacing grid_times_targets : bool if True, grids the constraint", "times for the alt/az computations, (2) 'altaz' contains the corresponding", "Constrain the airmass of a target. In the current implementation", "want. moon_separation = moon.separation(targets) if self.min is None and self.max", "eclipsing or non-eclipsing binary, or exoplanet system. min : float", "constraint = PhaseConstraint(binary, min=0.4, max=0.6) The minimum and maximum phase", "None: timezone = self.max.tzinfo if timezone is None: timezone =", "times, observer, targets): cached_altaz = _get_altaz(times, observer, targets) secz =", "= constraints return tab def min_best_rescale(vals, min_val, max_val, less_than_min=1): \"\"\"", "bool Forcefully use 0 pressure. Returns ------- moon_dict : dict", "or (t.time() <= max_time) for t in times.datetime]) except BaseException:", "moon_up_mask elif self.min is not None and self.max is not", "= self._get_solar_altitudes(times, observer, targets) mask = solar_altitude <= self.max_solar_altitude return", "for hashing aakey = _make_cache_key(times, 'moon') if aakey not in", "misbehave if you try to constrain negative altitudes, as the", "be unique and is # quicker than accessing the ra", "moon.separation(targets) if self.min is None and self.max is not None:", "Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. ephemeris", "(targets.frame.data.lon,) return timekey + targkey def _get_altaz(times, observer, targets, force_zero_pressure=False):", "target, times=times_ing, grid_times_targets=True) for constraint in constraints] applied_constraints_egr = [constraint(observer,", "airmass be \"better than 2\", i.e. at a higher altitude", "observer._altaz_cache[aakey]['altitude'] return altitude def compute_constraint(self, times, observer, targets): solar_altitude =", "`None` (optional) Maximum acceptable separation between Sun and target (inclusive).", "@classmethod def grey(cls, min=0.25, max=0.65, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint`", "self.max is None: raise ValueError(\"Cannot have a float AirmassConstraint if", "max=None, ephemeris=None): \"\"\" Parameters ---------- min : float or `None`", "of booleans of same length as ``targets`` for whether or", "an array of altitudes to be between 0 and 1,", "output_shape) return result @abstractmethod def compute_constraint(self, times, observer, targets): \"\"\"", "constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.all(constraint_arr, axis=1) def", "result @abstractmethod def compute_constraint(self, times, observer, targets): \"\"\" Actually do", "targets is slow due to # complex nature of these", "(2.25). All values outside the range should return 0. >>>", "targets) # make sure the output has the same shape", "---------- max_solar_altitude : `~astropy.units.Quantity` The altitude of the sun below", "if targets.isscalar: # ensure we have a (1, 1) shape", "applied_constraints_egr = [constraint(observer, target, times=times_egr, grid_times_targets=True) for constraint in constraints]", "min specified in \" \"MoonSeparationConstraint.\") return mask class LocalTimeConstraint(Constraint): \"\"\"", "use. If not given, use the one set with `~astropy.coordinates.solar_system_ephemeris`", "input array ``vals`` to be a score (between zero and", "must be specified as datetime.time objects.\") if self.max is not", "targets] ever_obs = np.any(constraint_arr, axis=1) always_obs = np.all(constraint_arr, axis=1) frac_obs", "airmasses = np.array([1, 1.5, 2, 3, 0]) >>> min_best_rescale(airmasses, 1,", "[target.name for target in targets] ever_obs = np.any(constraint_arr, axis=1) always_obs", "times on which to test the constraint. targets : {list,", "the one set with `~astropy.coordinates.solar_system_ephemeris` (which is set to 'builtin'", "to be \"night\" (inclusive). force_pressure_zero : bool (optional) Force the", "the interval [0, 1). To constrain observations on orbital phases", "targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): \"\"\" Determines which month the specified ``targets``", "if not isinstance(self.min, Time): raise TypeError(\"Time limits must be specified", "``targets`` for whether or not each target is observable in", "solar_altitude = self._get_solar_altitudes(times, observer, targets) mask = solar_altitude <= self.max_solar_altitude", "``'ever observable'``, ``'always observable'``, and ``'fraction of time observable'``. The", "returns ICRS coords. sun = get_body('sun', times, location=observer.location) solar_separation =", "zero. Parameters ---------- vals : array-like the values that need", "abstractmethod import datetime import time import warnings # Third-party from", "the altitude of the Sun that can occur when the", "---------- min : `~datetime.time` Earliest local time (inclusive). `None` indicates", "#5897 is released # which should be astropy 1.3.2 moon", "* len(shp2)) output_shape = np.broadcast(a, b).shape if output_shape != np.array(result).shape:", "either a minimum or a maximum time.\") if self.min is", "30, 40, 45, 55, 70]) >>> max_best_rescale(altitudes, 35, 60) #", "= moon.separation(targets) if self.min is None and self.max is not", "observing time to be within certain time limits. An example", "no maximum Parameters ---------- min : float or `None` (optional)", "in ``time_range`` with grid spacing ``time_resolution`` for ``observer``. Cache the", "less than ``min_val`` should also return one, in some cases", "(-12 degrees). \"\"\" return cls(max_solar_altitude=-12*u.deg, **kwargs) @classmethod def twilight_astronomical(cls, **kwargs):", "None: mask = self.max >= solar_separation elif self.max is None", ": {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets. observer", "max : `~astropy.time.Time` Latest time (inclusive). `None` indicates no limit.", "\"LocalTimeConstraint\", \"PrimaryEclipseConstraint\", \"SecondaryEclipseConstraint\", \"Constraint\", \"TimeConstraint\", \"observability_table\", \"months_observable\", \"max_best_rescale\", \"min_best_rescale\", \"PhaseConstraint\",", "@classmethod def bright(cls, min=0.65, max=None, **kwargs): \"\"\" initialize a `~astroplan.constraints.MoonIlluminationConstraint`", "To constrain observations on orbital phases between 0.6 and 1.2,", "on an ``observer``` object. This routine will provide an appropriate,", "Lower and upper bounds on time sequence If ``time_range`` is", "times v targets is slow due to # complex nature", "constrain negative altitudes, as the `~astropy.coordinates.AltAz` frame tends to mishandle", "cache key \"\"\" # make a tuple from times try:", "# cache the altitude observer._altaz_cache[aakey] = dict(times=times, altitude=altitude) finally: if", "class LocalTimeConstraint(Constraint): \"\"\" Constrain the observable hours. \"\"\" def __init__(self,", "a specific ``observer``, given the supplied ``constraints``. Parameters ---------- constraints", "False if hasattr(time_range, 'isscalar') and time_range.isscalar: time_range = (time_range-12*u.hour, time_range+12*u.hour)", "users who care about the upper limit on the airmass", "self.force_pressure_zero: observer_old_pressure = observer.pressure observer.pressure = 0 # find solar", "max_time) # If time boundaries straddle midnight: else: try: mask", "the lower limit. Parameters ---------- max : float or `None`", "get timezone from time objects, or from observer if self.min", "or 1 what is returned for ``vals`` above ``max_val``. (in", "``time_range``, given the constraints in ``constraints_list`` for ``observer``. Parameters ----------", "be \"night\" (inclusive). force_pressure_zero : bool (optional) Force the pressure", "from which to apply the constraints targets : sequence of", "if targets is not None: # broadcasting times v targets", "the separation in the frame of the moon coord #", "acceptable fractional illumination (inclusive). `None` indicates no limit. ephemeris :", "test times in ``time_range`` by checking constraint at linearly-spaced times", "also contains metadata entries ``'times'`` (with an array of all", "import min_best_rescale >>> import numpy as np >>> airmasses =", "of the Sun that can occur when the Sun is", "times.datetime]) except BaseException: mask = np.bool_((times.datetime.time() >= min_time) or (times.datetime.time()", "a float AirmassConstraint if max is None.\") else: mx =", "representing each month that a target is observable, one set", "0 or 1 what is returned for ``vals`` above ``max_val``.", "self.force_pressure_zero = force_pressure_zero @classmethod def twilight_civil(cls, **kwargs): \"\"\" Consider nighttime", "observable at each time, given the constraints. \"\"\" if not", "from .moon import moon_illumination from .utils import time_grid_from_range from .target", "(optional) Minimum acceptable separation between moon and target (inclusive). `None`", "list List of booleans of same length as ``targets`` for", "observer, targets): # first is the moon up? cached_moon =", "time.\") if self.min is not None: if not isinstance(self.min, datetime.time):", "test the constraint. observer : `~astroplan.Observer` The observer who has", "---------- max : float or `None` Maximum airmass of the", "the same shape as would result from # broadcasting times", "are 1-based so that January maps to 1, February maps", "and # broadcast these to find the correct shape shp1,", "``max_val``. (in some cases anything higher than ``max_val`` should also", "at those times. \"\"\" if not hasattr(observer, '_altaz_cache'): observer._altaz_cache =", "compute_constraint(self, times, observer, targets): separation = abs(targets.transform_to(Galactic).b) if self.min is", "distance between the Galactic plane and some targets. \"\"\" def", "to be below ``horizon``. \"\"\" @u.quantity_input(horizon=u.deg) def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True):", "of ``times`` and ``targets``. Often, we wish to store expensive", "if the end user # disables gridding and re-shapes the", "Constrain observations to times in some range of phases for", "not each target is observable in the time range given", "times is not None: applied_constraints = [constraint(observer, target, times=times, grid_times_targets=True)", "and/or min specified in \" \"AirmassConstraint.\") return mask else: if", "is observable, one set per target. These integers are 1-based", "target, times=times_egr, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing),", "grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.all(constraint_arr,", ">>> from astroplan import Observer >>> from astroplan.constraints import LocalTimeConstraint", "altaz = observer.altaz(times, targets, grid_times_targets=False) observer._altaz_cache[aakey] = dict(times=times, altaz=altaz) finally:", "self.max)) & moon_up_mask else: raise ValueError(\"No max and/or min specified", "None: min_time = self.min else: min_time = self.min = datetime.time(0,", "refraction return nonsense values. \"\"\" self.max_solar_altitude = max_solar_altitude self.force_pressure_zero =", "specified ``targets`` are observable for a specific ``observer``, given the", "def compute_constraint(self, times, observer, targets): # use get_body rather than", "max_best_rescale >>> import numpy as np >>> altitudes = np.array([20,", "Forcefully use 0 pressure. Returns ------- moon_dict : dict Dictionary", "those times. \"\"\" if not hasattr(observer, '_moon_cache'): observer._moon_cache = {}", "same shape as would result from # broadcasting times and", "and times along the second. Otherwise, we rely on broadcasting", "= solar_altitude <= self.max_solar_altitude return mask class GalacticLatitudeConstraint(Constraint): \"\"\" Constrain", "None: mask = self.min <= secz elif self.min is not", "observations to targets that are observable between 23:50 and 04:08", "def compute_constraint(self, times, observer, targets): # removed the location argument", "limit. max : `~astropy.time.Time` Latest time (inclusive). `None` indicates no", "wish to store expensive calculations for a combination of ``targets``", "from astroplan.constraints import max_best_rescale >>> import numpy as np >>>", "moon_down_mask elif self.max is None and self.min is not None:", "goes to one, and the ``min_val`` goes to zero. Parameters", "return mask class SunSeparationConstraint(Constraint): \"\"\" Constrain the distance between the", "60. >>> from astroplan.constraints import max_best_rescale >>> import numpy as", "index and times along the second. \"\"\" # Should be", "<= self.max_solar_altitude return mask class GalacticLatitudeConstraint(Constraint): \"\"\" Constrain the distance", "class TimeConstraint(Constraint): \"\"\"Constrain the observing time to be within certain", ") def _make_cache_key(times, targets): \"\"\" Make a unique key to", "self.max is not None: timezone = self.max.tzinfo if timezone is", "<= separation) & (separation <= self.max)) else: raise ValueError(\"No max", "defining observational constraints. \"\"\" __metaclass__ = ABCMeta def __call__(self, observer,", "separation elif self.min is not None and self.max is not", "local time (inclusive). `None` indicates no limit. max : `~datetime.time`", "return mask class PrimaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times during", "table with information about observability for all the ``targets`` over", "as np >>> airmasses = np.array([1, 1.5, 2, 3, 0])", "and worst (2.25). All values outside the range should return", "__init__(self, min=None, max=None): \"\"\" Parameters ---------- min : `~astropy.time.Time` Earliest", "the Moon has set. \"\"\" def __init__(self, min=None, max=None, ephemeris=None):", "\"\"\" # make a tuple from times try: timekey =", "or not each target is observable in the time range", "limit. max : `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation", "times separated by ``time_resolution``. Default is 0.5 hours. Returns -------", "on which to test the constraints times_ingress_egress : `~astropy.time.Time` (optional)", "``time_range`` is specified, determine whether constraints are met between test", "max_solar_altitude=0*u.deg, force_pressure_zero=True): \"\"\" Parameters ---------- max_solar_altitude : `~astropy.units.Quantity` The altitude", "phases for a periodic event (e.g.~transiting exoplanets, eclipsing binaries). \"\"\"", "A Table containing the observability information for each of the", "``time_resolution``. Default is 0.5 hours. Returns ------- observable_months : list", "class for objects defining observational constraints. \"\"\" __metaclass__ = ABCMeta", "import time import warnings # Third-party from astropy.time import Time", "on [0, 1], where 0 is the min altitude and", "separation is as-seen # by the observer. # 'get_sun' returns", "is not None: mask = self.min <= solar_separation elif self.min", "\"SunSeparationConstraint.\") return mask class MoonSeparationConstraint(Constraint): \"\"\" Constrain the distance between", "moon coord # which is GCRS, and that is what", "but there is potential for huge speedup if the end", "cached_altaz = _get_altaz(times, observer, targets) secz = cached_altaz['altaz'].secz.value if self.boolean_constraint:", "`~astroplan.FixedTarget`} Target or list of targets. observer : `~astroplan.Observer` The", "1.3.2 moon = get_moon(times, ephemeris=self.ephemeris) # note to future editors", "the constraint. Subclasses override this. Parameters ---------- times : `~astropy.time.Time`", "for an array of times for ``targets`` and ``observer``. Cache", ": `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between Sun", "\"\"\" self.min = min self.max = max self.ephemeris = ephemeris", "and/or min specified in \" \"MoonSeparationConstraint.\") return mask class MoonIlluminationConstraint(Constraint):", "Galactic, SkyCoord from astropy import table import numpy as np", "(illumination <= self.max)) & moon_up_mask else: raise ValueError(\"No max and/or", "is None: raise ValueError(\"You must at least supply either a", "aakey not in observer._meridian_transit_cache: meridian_transit_times = observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey] =", "mishandle negative Parameters ---------- min : `~astropy.units.Quantity` or `None` Minimum", "to apply the constraints targets : sequence of `~astroplan.Target` The", "make hashable thing from targets coords try: if hasattr(targets, 'frame'):", "\"SunSeparationConstraint\", \"MoonSeparationConstraint\", \"MoonIlluminationConstraint\", \"LocalTimeConstraint\", \"PrimaryEclipseConstraint\", \"SecondaryEclipseConstraint\", \"Constraint\", \"TimeConstraint\", \"observability_table\", \"months_observable\",", "degrees). \"\"\" return cls(max_solar_altitude=-18*u.deg, **kwargs) def _get_solar_altitudes(self, times, observer, targets):", "= ABCMeta def __call__(self, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour, grid_times_targets=False):", "observable'`` will also be present if the ``time_range`` is given", "times, observer, targets): timezone = None # get timezone from", "a lot by dropping to the trigonometric # altitude calculations.", "1 what is returned for ``vals`` above ``max_val``. (in some", "(2) 'altaz' contains the corresponding alt/az coordinates at those times.", "for a periodic event (e.g.~transiting exoplanets, eclipsing binaries). \"\"\" def", "indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Maximum", "avoids errors in the altitude of the Sun that can", "for this class would be to associate an acceptable time", "np.any(constraint_arr, axis=1) always_obs = np.all(constraint_arr, axis=1) frac_obs = np.sum(constraint_arr, axis=1)", "broadcasting times and targets against each other if targets is", "and/or min specified in \" \"GalacticLatitudeConstraint.\") return mask class SunSeparationConstraint(Constraint):", "constraints targets : sequence of `~astroplan.Target` The targets on which", ">= solar_separation elif self.max is None and self.min is not", "some targets. \"\"\" def __init__(self, min=None, max=None, ephemeris=None): \"\"\" Parameters", "@classmethod def twilight_nautical(cls, **kwargs): \"\"\" Consider nighttime as time between", "If ``time_range`` is not specified, defaults to current year (localtime)", "in some range of phases for a periodic event (e.g.~transiting", "mask class GalacticLatitudeConstraint(Constraint): \"\"\" Constrain the distance between the Galactic", "on same day: if min_time < max_time: try: mask =", "we have a (1, 1) shape coord targets = SkyCoord(np.tile(targets,", "warnings # Third-party from astropy.time import Time import astropy.units as", "the second. \"\"\" if times is None and time_range is", "class MoonIlluminationConstraint(Constraint): \"\"\" Constrain the fractional illumination of the Earth's", "local time: >>> from astroplan import Observer >>> from astroplan.constraints", "initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.65", "if not all observing blocks are valid over the time", "whether ``targets`` are always observable throughout ``time_range`` given constraints in", "upper bounds on time sequence, with spacing ``time_resolution``. This will", "computing the constraint. Subclasses override this. Parameters ---------- times :", "is observable in the time range given the constraints. \"\"\"", "times on which to test the constraints times_ingress_egress : `~astropy.time.Time`", "event_observable : `~numpy.ndarray` Array of booleans of same length as", "ValueError(\"Cannot have a float AirmassConstraint if max is None.\") else:", "altitude az and illumination for an array of times for", "an input array ``vals`` to be a score (between zero", "is None else self.min max_time = Time(\"2120-01-01T00:00:00\") if self.max is", "with spacing ``time_resolution``. This will be passed as the first", "constraint. observer : `~astroplan.Observer` The observer who has constraints ``constraints``.", "argument into `~astroplan.time_grid_from_range`. If a single (scalar) time, the table", "no limit. max : float or `None` (optional) Maximum acceptable", "into `~astroplan.time_grid_from_range`. If a single (scalar) time, the table will", "def _get_solar_altitudes(self, times, observer, targets): if not hasattr(observer, '_altaz_cache'): observer._altaz_cache", "result with targets along the first index and times along", "ephemeris def compute_constraint(self, times, observer, targets): # removed the location", "and the corrections for atmospheric refraction return nonsense values. \"\"\"", "target. These integers are 1-based so that January maps to", "else: try: mask = np.array([(t.time() >= min_time) or (t.time() <=", "be to associate an acceptable time range with a specific", "and egress times for ``N`` events, with shape (``N``, 2).", "altitude calculations. if not hasattr(constraints, '__len__'): constraints = [constraints] times", "the observation location from which to apply the constraints targets", "Package from .moon import moon_illumination from .utils import time_grid_from_range from", "Sun and target (inclusive). `None` indicates no limit. \"\"\" self.min", "with defaults of no minimum and a maximum of 0.25", "An example use case for this class would be to", "time between nautical twilights (-12 degrees). \"\"\" return cls(max_solar_altitude=-12*u.deg, **kwargs)", "__call__(self, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour, grid_times_targets=False): \"\"\" Compute the", "(inclusive). `None` indicates no limit. Examples -------- Constrain the observations", "`~astropy.time.Time` (optional) Array of times on which to test the", "and times along the second. \"\"\" # Should be implemented", "constraint that requires the airmass be \"better than 2\", i.e.", "the sun below which it is considered to be \"night\"", "None and self.max is None: raise ValueError(\"You must at least", "a score (between zero and one), where the ``min_val`` goes", "which targets are observable for an observer. \"\"\" from __future__", "# make hashable thing from targets coords try: if hasattr(targets,", "= observer_old_pressure return observer._moon_cache[aakey] def _get_meridian_transit_times(times, observer, targets): \"\"\" Calculate", "complex nature of these objects. We make # to simple", "- min_val) below = vals < min_val above = vals", "``vals`` equal to ``min_val`` equal 0 and those equal to", "constraints. \"\"\" __metaclass__ = ABCMeta def __call__(self, observer, targets, times=None,", "max_time: try: mask = np.array([min_time <= t.time() <= max_time for", "observable'``. The column ``'time observable'`` will also be present if", "first index and times along the second. Otherwise, we rely", "equal 1 Examples -------- rescale airmasses to between 0 and", "what is returned for ``vals`` below ``min_val``. (in some cases", "AND TIME_RANGE ARE SET? time_range : `~astropy.time.Time` (length = 2)", "try: if self.force_pressure_zero: observer_old_pressure = observer.pressure observer.pressure = 0 #", ": bool Forcefully use 0 pressure. Returns ------- moon_dict :", "is also satisfied if the Moon has set. \"\"\" def", "= min self.max = max self.ephemeris = ephemeris @classmethod def", "Examples -------- Constrain the observations to targets that are observable", "not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} # convert times, targets", "`None` Minimum airmass of the target. `None` indicates no limit.", "min : float or `None` (optional) Minimum acceptable fractional illumination", "not None: max_time = self.max else: max_time = datetime.time(23, 59,", "times, observer=None, targets=None): mask = self.eclipsing_system.in_secondary_eclipse(times) return mask class PhaseConstraint(Constraint):", "return cls(min, max, **kwargs) @classmethod def bright(cls, min=0.65, max=None, **kwargs):", "from .target import get_skycoord __all__ = [\"AltitudeConstraint\", \"AirmassConstraint\", \"AtNightConstraint\", \"is_observable\",", "None: mask = self.max >= separation elif self.max is None", "targets): timezone = None # get timezone from time objects,", "if self.min is None else self.min max_time = Time(\"2120-01-01T00:00:00\") if", "hashing aakey = _make_cache_key(times, 'moon') if aakey not in observer._moon_cache:", "get_body rather than get sun here, since # it returns", "``targets``. The table contains four columns with information about the", "# disables gridding and re-shapes the coords themselves # prior", "cases anything higher than ``max_val`` should also return one, in", "Latest time (inclusive). `None` indicates no limit. Examples -------- Constrain", "boolean_constraint def compute_constraint(self, times, observer, targets): cached_altaz = _get_altaz(times, observer,", "dropping to the trigonometric # altitude calculations. applied_constraints = [constraint(observer,", "you try to constrain negative altitudes, as the `~astropy.coordinates.AltAz` frame", "and those equal to ``min_val`` equal 1 Examples -------- rescale", "first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range``", "the Sun and some targets. \"\"\" def __init__(self, min=None, max=None):", "of the target. .. note:: This can misbehave if you", "observer, targets): \"\"\" Calculate next meridian transit for an array", "None: mask = (self.min <= secz) & (secz <= self.max)", "is not None else 0.0 self.max = max if max", "self.boolean_constraint: lowermask = self.min <= alt uppermask = alt <=", "times=times, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) months_observable", "moon_down_mask = moon_alt < 0 moon_up_mask = moon_alt >= 0", "max_val rescaled[below] = less_than_min rescaled[above] = 0 return rescaled def", "Array of ingress and egress times for ``N`` events, with", "a maximum time.\") if self.min is not None: if not", "moon altitude az and illumination for an array of times", "shape coord targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis] else: targets =", "`~astropy.units.Quantity` The altitude of the sun below which it is", "None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if is_24hr_table: tab['time observable'] =", "corresponding alt/az coordinates at those times. \"\"\" if not hasattr(observer,", "this class Parameters ---------- observer : `~astroplan.Observer` the observation location", "`~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list,", "of ``targets`` and ``times`` in a cache on an ``observer```", "must be in secondary eclipse. \"\"\" self.eclipsing_system = eclipsing_system def", "= Time(\"2120-01-01T00:00:00\") if self.max is None else self.max mask =", "longitude # attribute of the frame data should be unique", "has the same shape as would result from # broadcasting", "constraint. Subclasses override this. Parameters ---------- times : `~astropy.time.Time` The", "the order (max, min) in this initializer to support the", "find solar altitude at these times altaz = observer.altaz(times, get_sun(times))", "\" \"astropy.time.Time objects.\") if self.max is not None: if not", "where the ``min_val`` goes to one, and the ``max_val`` goes", "between 0 and 1, with the best (1) and worst", "`is_always_observable`. \"\"\" def __init__(self, min=None, max=None): \"\"\" Parameters ---------- min", "times to compute the constraint observer : `~astroplan.Observer` the observaton", "targets = get_skycoord(targets) # TODO: these broadcasting operations are relatively", "location argument here, which causes small <1 deg # innacuracies,", "the corresponding alt/az coordinates at those times and (3) contains", "`None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no", "not None else 1.0 def compute_constraint(self, times, observer=None, targets=None): phase", "degrees). \"\"\" return cls(max_solar_altitude=-12*u.deg, **kwargs) @classmethod def twilight_astronomical(cls, **kwargs): \"\"\"", "indicates no limit. ephemeris : str, optional Ephemeris to use.", "max. \"\"\" def __init__(self, min=None, max=None, boolean_constraint=True): if min is", "in ``constraints_list`` for ``observer``. Parameters ---------- constraints : list or", "= (time_range-12*u.hour, time_range+12*u.hour) is_24hr_table = True applied_constraints = [constraint(observer, targets,", "04:08 local Hawaiian time >>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) \"\"\"", "also return one, in some cases it should return zero)", "altitude def compute_constraint(self, times, observer, targets): solar_altitude = self._get_solar_altitudes(times, observer,", "HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE SET? time_range :", "<= moon_separation elif self.min is not None and self.max is", "self.boolean_constraint: if self.min is None and self.max is not None:", ">>> import datetime as dt >>> subaru = Observer.at_site(\"Subaru\", timezone=\"US/Hawaii\")", "TypeError(\"Time limits must be specified as datetime.time objects.\") def compute_constraint(self,", "to apply the constraints. Returns ------- constraint_result : 2D array", "max_time) return mask class PrimaryEclipseConstraint(Constraint): \"\"\" Constrain observations to times", "(in some cases anything less than ``min_val`` should also return", "tuple from times try: timekey = tuple(times.jd) + times.shape except", "0 and 1, with the best (1) and worst (2.25).", ".. note:: This can misbehave if you try to constrain", "the current implementation the airmass is approximated by the secant", "---------- min : `~astropy.units.Quantity` or `None` Minimum altitude of the", "raise ValueError(\"No max and/or min specified in \" \"AirmassConstraint.\") return", "eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in secondary eclipse.", "a string. targkey = (targets,) except BaseException: targkey = (targets.frame.data.lon,)", "no limit. boolean_contstraint : bool Examples -------- To create a", "constraint observer : `~astroplan.Observer` the observaton location from which to", "np.array([(t.time() >= min_time) or (t.time() <= max_time) for t in", "PhaseConstraint(binary, min=0.4, max=0.6) The minimum and maximum phase must be", "self.max is not None: mask = self.max >= moon_separation elif", "in times.datetime]) except BaseException: # use np.bool so shape queries", "than airmass=2:: AirmassConstraint(2) \"\"\" def __init__(self, max=None, min=1, boolean_constraint=True): self.min", "tab.meta['times'] = times.datetime tab.meta['observer'] = observer tab.meta['constraints'] = constraints return", "user # disables gridding and re-shapes the coords themselves #", "going to 1 and worst (35) going to 0. For", "the moon up? cached_moon = _get_moon_data(times, observer) moon_alt = cached_moon['altaz'].alt", "`~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has", "in the order (max, min) in this initializer to support", "nighttime as time between astronomical twilights (-18 degrees). \"\"\" return", "integers representing each month that a target is observable, one", "hours. Returns ------- ever_observable : list List of booleans of", "is not None: applied_constraints = [constraint(observer, target, times=times, grid_times_targets=True) for", "is a string. targkey = (targets,) except BaseException: targkey =", "Examples -------- To create a constraint that requires the airmass", "order (max, min) in this initializer to support the common", "self.ephemeris = ephemeris def compute_constraint(self, times, observer, targets): # removed", "``targets`` are observable for a specific ``observer``, given the supplied", "centred frame, so the separation is as-seen # by the", "(t.time() <= max_time) for t in times.datetime]) except BaseException: mask", "not isinstance(self.min, Time): raise TypeError(\"Time limits must be specified as", "hashing aakey = _make_cache_key(times, targets) if aakey not in observer._altaz_cache:", "# complex nature of these objects. We make # to", "observer.pressure = observer_old_pressure else: altitude = observer._altaz_cache[aakey]['altitude'] return altitude def", "= [constraints] if times is not None: applied_constraints = [constraint(observer,", "and self.max is not None: mask = secz <= self.max", "with defaults of a minimum of 0.65 and no maximum", "local time (inclusive). `None` indicates no limit. Examples -------- Constrain", "defaults to current year (localtime) time_grid_resolution : `~astropy.units.Quantity` (optional) If", "= _get_moon_data(times, observer) moon_alt = cached_moon['altaz'].alt moon_down_mask = moon_alt <", "1).') self.min = min if min is not None else", "min=1, boolean_constraint=True): self.min = min self.max = max self.boolean_constraint =", "objects. We make # to simple numpy arrays of the", "max : `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between", "objects.\") if self.max is not None: if not isinstance(self.max, Time):", "if aakey not in observer._moon_cache: try: if force_zero_pressure: observer_old_pressure =", "altitude at these times altaz = observer.altaz(times, get_sun(times)) altitude =", "\"astropy.time.Time objects.\") def compute_constraint(self, times, observer, targets): with warnings.catch_warnings(): warnings.simplefilter('ignore')", "mask = np.bool_(min_time <= times.datetime.time() <= max_time) # If time", "each target is observable in the time range given the", "dict(times=times, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._altaz_cache[aakey]", "illumination (inclusive). `None` indicates no limit. ephemeris : str, optional", "airmass of the target. `None` indicates no limit. min :", "of unique integers representing each month that a target is", "def __init__(self, min=None, max=None): \"\"\" Parameters ---------- min : `~astropy.units.Quantity`", "described on the interval [0, 1). To constrain observations on", "Returns ------- moon_dict : dict Dictionary containing three key-value pairs.", "Constraint raise NotImplementedError class AltitudeConstraint(Constraint): \"\"\" Constrain the altitude of", "Time(\"2016-03-30T12:00:00\") >>> constraint = TimeConstraint(t1,t2) \"\"\" self.min = min self.max", "at least supply either a minimum or a \" \"maximum", "['target name', 'ever observable', 'always observable', 'fraction of time observable']", "errors in the altitude of the Sun that can occur", "= self.min <= moon_separation elif self.min is not None and", "or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who", "(targets,) except BaseException: targkey = (targets.frame.data.lon,) return timekey + targkey", "not None and self.max is not None: mask = ((self.min", "key-value pair. 'times' contains the meridian_transit times. \"\"\" if not", ": `~numpy.ndarray` Array of booleans of same length as ``times``", "time_range+12*u.hour) is_24hr_table = True applied_constraints = [constraint(observer, targets, times=times, time_range=time_range,", "equal to ``min_val`` equal 1 Examples -------- rescale airmasses to", "Constrain the observable hours. \"\"\" def __init__(self, min=None, max=None): \"\"\"", "If a single (scalar) time, the table will be for", "unique and is # quicker than accessing the ra attribute.", "'times' contains the meridian_transit times. \"\"\" if not hasattr(observer, '_meridian_transit_cache'):", "list of targets. Returns ------- cache_key : tuple A hashable", "for atmospheric refraction return nonsense values. \"\"\" self.max_solar_altitude = max_solar_altitude", "try: mask = np.array([min_time <= t.time() <= max_time for t", "values outside the range, the rescale should return 0 below", "targets against each other if targets is not None: #", "given constraints in the ``constraints_list`` for a particular ``observer``. Parameters", "acceptable fractional illumination (inclusive). `None` indicates no limit. max :", "------- ever_observable : list List of booleans of same length", "class Constraint(object): \"\"\" Abstract class for objects defining observational constraints.", "for outside). If False, the constraint returns a float on", "to be between 0 and 1 min_val : float worst", "or `None` Maximum altitude of the target (inclusive). `None` indicates", "# If time limits occur on same day: if min_time", "= as_strided(x, shape=shp1, strides=[0] * len(shp1)) b = as_strided(x, shape=shp2,", "------- constraint_result : 2D array of float or bool The", "work of computing the constraint. Subclasses override this. Parameters ----------", "months_observable(constraints, observer, targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): \"\"\" Determines which month the", "these objects. We make # to simple numpy arrays of", "---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in secondary", "potential for huge speedup if the end user # disables", "limit. max : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic", "self.min <= secz elif self.min is not None and self.max", "float AirmassConstraint if max is None.\") else: mx = self.max", "and self.max is not None: mask = (self.max >= illumination)", "observer.pressure = 0 # find solar altitude at these times", "all the times), ``'observer'`` (the `~astroplan.Observer` object), and ``'constraints'`` (containing", "the ``max_val`` goes to zero. Parameters ---------- vals : array-like", "for users who care about the upper limit on the", "observer._altaz_cache = {} # convert times, targets to tuple for", "the Galactic plane and some targets. \"\"\" def __init__(self, min=None,", "np.logical_and.reduce(applied_constraints) colnames = ['target name', 'ever observable', 'always observable', 'fraction", "Creates a table with information about observability for all the", "None and self.max is not None: mask = secz <=", "it's observability: ``'target name'``, ``'ever observable'``, ``'always observable'``, and ``'fraction", "and self.min is not None: mask = self.min <= moon_separation", "to times during secondary eclipse. \"\"\" def __init__(self, eclipsing_system): \"\"\"", "with a specific observing block. This can be useful if", "is not None: mask = (self.min <= secz) & (secz", "to compute the phase. For example, the system could be", "= max def compute_constraint(self, times, observer, targets): separation = abs(targets.transform_to(Galactic).b)", "= periodic_event if (min < 0) or (min > 1)", "in targets] ever_obs = np.any(constraint_arr, axis=1) always_obs = np.all(constraint_arr, axis=1)", "min specified in \" \"AirmassConstraint.\") return mask else: if self.max", "constraints. times : `~astropy.time.Time` The times to compute the constraint.", "the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin'", ", 0. ]) \"\"\" rescaled = (vals - max_val) /", "information for each of the ``targets``. The table contains four", "be \"better than 2\", i.e. at a higher altitude than", "0 illumination = cached_moon['illum'] if self.min is None and self.max", "!= np.array(result).shape: result = np.broadcast_to(result, output_shape) return result @abstractmethod def", "times.datetime]) except BaseException: # use np.bool so shape queries don't", "periodic_event if (min < 0) or (min > 1) or", "\"AtNightConstraint\", \"is_observable\", \"is_always_observable\", \"time_grid_from_range\", \"GalacticLatitudeConstraint\", \"SunSeparationConstraint\", \"MoonSeparationConstraint\", \"MoonIlluminationConstraint\", \"LocalTimeConstraint\", \"PrimaryEclipseConstraint\",", "separation in the frame of the moon coord # which", "None: self.min = -90*u.deg else: self.min = min if max", "Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max", "of these objects. We make # to simple numpy arrays", "90*u.deg else: self.max = max self.boolean_constraint = boolean_constraint def compute_constraint(self,", "return mask class GalacticLatitudeConstraint(Constraint): \"\"\" Constrain the distance between the" ]
[ "= 'index' return render(request, 'backend/index.html', context) @login_required() def posts(request): context", "tags = paginator.page(paginator.num_pages) context['tags'] = tags return render(request, 'backend/tags.html', context)", "created.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request, 'backend/edit_tag.html', context)", "= {} context['nav_active'] = 'categories' form = CategoryForm() if request.method", "context['tags'] = tags return render(request, 'backend/tags.html', context) @login_required() def add_tag(request):", "request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1)", "request.FILES, instance=category) if form.is_valid(): form.save() messages.success(request, 'Category updated.') return HttpResponseRedirect(reverse('user_panel_categories'))", "'posts' post_list = Post.objects.all() paginator = Paginator(list(reversed(post_list)), 10) page =", "form = TagForm() if request.method == 'POST': form = TagForm(request.POST,", "post = Post.objects.get(pk=post_id) context['post'] = post form = PostForm(instance=post) if", "from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.paginator", "form return render(request, 'backend/edit_tag.html', context) @login_required() def edit_tag(request, tag_id): context", "= CategoryForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Category created.') return", "'POST': form = TagForm(request.POST, request.FILES, instance=tag) if form.is_valid(): form.save() messages.success(request,", "form.save() messages.success(request, 'Tag updated.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return", "TagForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Tag created.') return HttpResponseRedirect(reverse('user_panel_tags'))", "= 'tags' form = TagForm() if request.method == 'POST': form", "import render from django.http import HttpResponseRedirect from core.models import Post,", "= 'tags' tag = Tag.objects.get(pk=tag_id) context['tag'] = tag form =", "request.method == 'POST': form = CategoryForm(request.POST, request.FILES, instance=category) if form.is_valid():", "paginator.page(page) except PageNotAnInteger: tags = paginator.page(1) except EmptyPage: tags =", "return HttpResponseRedirect(reverse('user_panel_posts')) @login_required() def categories(request): context = {} context['nav_active'] =", "@login_required() def edit_tag(request, tag_id): context = {} context['nav_active'] = 'tags'", "paginator = Paginator(list(reversed(categories_list)), 10) page = request.GET.get('page') try: categories =", "CategoryForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Category created.') return HttpResponseRedirect(reverse('user_panel_categories'))", "def delete_tag(request, tag_id): context = {} context['nav_active'] = 'tags' tag", "context['categories'] = categories return render(request, 'backend/categories.html', context) @login_required() def add_category(request):", "PageNotAnInteger, EmptyPage from django.core.urlresolvers import reverse from django.shortcuts import render", "context = {} context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) tag.delete()", "here. @login_required() def index(request): context = {} context['nav_active'] = 'index'", "tag_id): context = {} context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id)", "request.GET.get('page') try: tags = paginator.page(page) except PageNotAnInteger: tags = paginator.page(1)", "context) @login_required() def edit_category(request, category_id): context = {} context['nav_active'] =", "10) page = request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger:", "form = PostForm(instance=post) if request.method == 'POST': form = PostForm(request.POST,", "form.save() messages.success(request, 'Tag created.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return", "'backend/edit_category.html', context) @login_required() def delete_category(request, category_id): context = {} context['nav_active']", "form = PostForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Post created.')", "{} context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) context['post'] = post", "= 'categories' categories_list = Category.objects.all() paginator = Paginator(list(reversed(categories_list)), 10) page", "HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request, 'backend/edit_category.html', context) @login_required() def", "return render(request, 'backend/edit_tag.html', context) @login_required() def delete_tag(request, tag_id): context =", "categories = paginator.page(page) except PageNotAnInteger: categories = paginator.page(1) except EmptyPage:", "= posts return render(request, 'backend/posts.html', context) @login_required() def add_post(request): context", "= PostForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Post created.') return", "form.is_valid(): form.save() messages.success(request, 'Category updated.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form", "import reverse from django.shortcuts import render from django.http import HttpResponseRedirect", "= CategoryForm(request.POST, request.FILES, instance=category) if form.is_valid(): form.save() messages.success(request, 'Category updated.')", "= Tag.objects.get(pk=tag_id) context['tag'] = tag form = TagForm(instance=tag) if request.method", "form return render(request, 'backend/edit_category.html', context) @login_required() def edit_category(request, category_id): context", "created.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request, 'backend/edit_post.html', context)", "= CategoryForm(instance=category) if request.method == 'POST': form = CategoryForm(request.POST, request.FILES,", "posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) context['posts'] =", "'tags' tag = Tag.objects.get(pk=tag_id) context['tag'] = tag form = TagForm(instance=tag)", "categories(request): context = {} context['nav_active'] = 'categories' categories_list = Category.objects.all()", "= {} context['nav_active'] = 'index' return render(request, 'backend/index.html', context) @login_required()", "= TagForm() if request.method == 'POST': form = TagForm(request.POST, request.FILES)", "PostForm, CategoryForm, TagForm # Create your views here. @login_required() def", "PageNotAnInteger: tags = paginator.page(1) except EmptyPage: tags = paginator.page(paginator.num_pages) context['tags']", "context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) context['category'] = category form", "'backend/index.html', context) @login_required() def posts(request): context = {} context['nav_active'] =", "Category.objects.get(pk=category_id) category.delete() messages.success(request, 'Category deleted.') return HttpResponseRedirect(reverse('user_panel_categories')) @login_required() def tags(request):", "posts return render(request, 'backend/posts.html', context) @login_required() def add_post(request): context =", "render(request, 'backend/categories.html', context) @login_required() def add_category(request): context = {} context['nav_active']", "'Post deleted.') return HttpResponseRedirect(reverse('user_panel_posts')) @login_required() def categories(request): context = {}", "= CategoryForm() if request.method == 'POST': form = CategoryForm(request.POST, request.FILES)", "= 'tags' tags_list = Tag.objects.all() paginator = Paginator(list(reversed(tags_list)), 10) page", "tags return render(request, 'backend/tags.html', context) @login_required() def add_tag(request): context =", "form = TagForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Tag created.')", "= {} context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) context['tag'] =", "import login_required from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers", "index(request): context = {} context['nav_active'] = 'index' return render(request, 'backend/index.html',", "context['nav_active'] = 'posts' post_list = Post.objects.all() paginator = Paginator(list(reversed(post_list)), 10)", "Category.objects.get(pk=category_id) context['category'] = category form = CategoryForm(instance=category) if request.method ==", "context = {} context['nav_active'] = 'tags' form = TagForm() if", "= TagForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Tag created.') return", "form.save() messages.success(request, 'Category updated.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return", "HttpResponseRedirect(reverse('user_panel_posts')) @login_required() def categories(request): context = {} context['nav_active'] = 'categories'", "django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.paginator import", "{} context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) category.delete() messages.success(request, 'Category", "paginator.page(1) except EmptyPage: categories = paginator.page(paginator.num_pages) context['categories'] = categories return", "context['posts'] = posts return render(request, 'backend/posts.html', context) @login_required() def add_post(request):", "PostForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Post created.') return HttpResponseRedirect(reverse('user_panel_posts'))", "if form.is_valid(): form.save() messages.success(request, 'Category updated.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] =", "= PostForm(request.POST, request.FILES, instance=post) if form.is_valid(): form.save() messages.success(request, 'Post updated.')", "context) @login_required() def add_category(request): context = {} context['nav_active'] = 'categories'", "add_category(request): context = {} context['nav_active'] = 'categories' form = CategoryForm()", "messages.success(request, 'Post updated.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request,", "return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request, 'backend/edit_tag.html', context) @login_required()", "context = {} context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) category.delete()", "10) page = request.GET.get('page') try: tags = paginator.page(page) except PageNotAnInteger:", "try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except", "edit_post(request, post_id): context = {} context['nav_active'] = 'posts' post =", "= 'tags' tag = Tag.objects.get(pk=tag_id) tag.delete() messages.success(request, 'Tag deleted.') return", "posts = paginator.page(paginator.num_pages) context['posts'] = posts return render(request, 'backend/posts.html', context)", "context = {} context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) post.delete()", "def add_tag(request): context = {} context['nav_active'] = 'tags' form =", "'Tag updated.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request, 'backend/edit_tag.html',", "{} context['nav_active'] = 'index' return render(request, 'backend/index.html', context) @login_required() def", "form = PostForm(request.POST, request.FILES, instance=post) if form.is_valid(): form.save() messages.success(request, 'Post", "EmptyPage: tags = paginator.page(paginator.num_pages) context['tags'] = tags return render(request, 'backend/tags.html',", "delete_tag(request, tag_id): context = {} context['nav_active'] = 'tags' tag =", "from backend.forms import PostForm, CategoryForm, TagForm # Create your views", "request.GET.get('page') try: categories = paginator.page(page) except PageNotAnInteger: categories = paginator.page(1)", "= Category.objects.get(pk=category_id) category.delete() messages.success(request, 'Category deleted.') return HttpResponseRedirect(reverse('user_panel_categories')) @login_required() def", "= {} context['nav_active'] = 'posts' form = PostForm() if request.method", "= Paginator(list(reversed(post_list)), 10) page = request.GET.get('page') try: posts = paginator.page(page)", "@login_required() def edit_post(request, post_id): context = {} context['nav_active'] = 'posts'", "messages.success(request, 'Category deleted.') return HttpResponseRedirect(reverse('user_panel_categories')) @login_required() def tags(request): context =", "context) @login_required() def posts(request): context = {} context['nav_active'] = 'posts'", "if request.method == 'POST': form = TagForm(request.POST, request.FILES, instance=tag) if", "Post.objects.get(pk=post_id) post.delete() messages.success(request, 'Post deleted.') return HttpResponseRedirect(reverse('user_panel_posts')) @login_required() def categories(request):", "edit_tag(request, tag_id): context = {} context['nav_active'] = 'tags' tag =", "= 'posts' post_list = Post.objects.all() paginator = Paginator(list(reversed(post_list)), 10) page", "PostForm(request.POST, request.FILES, instance=post) if form.is_valid(): form.save() messages.success(request, 'Post updated.') return", "context['form'] = form return render(request, 'backend/edit_category.html', context) @login_required() def edit_category(request,", "if request.method == 'POST': form = CategoryForm(request.POST, request.FILES) if form.is_valid():", "Tag.objects.all() paginator = Paginator(list(reversed(tags_list)), 10) page = request.GET.get('page') try: tags", "def delete_category(request, category_id): context = {} context['nav_active'] = 'categories' category", "if form.is_valid(): form.save() messages.success(request, 'Tag updated.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] =", "form return render(request, 'backend/edit_post.html', context) @login_required() def edit_post(request, post_id): context", "if request.method == 'POST': form = PostForm(request.POST, request.FILES) if form.is_valid():", "'POST': form = CategoryForm(request.POST, request.FILES, instance=category) if form.is_valid(): form.save() messages.success(request,", "render(request, 'backend/posts.html', context) @login_required() def add_post(request): context = {} context['nav_active']", "request.method == 'POST': form = PostForm(request.POST, request.FILES) if form.is_valid(): form.save()", "form = CategoryForm(instance=category) if request.method == 'POST': form = CategoryForm(request.POST,", "render(request, 'backend/edit_post.html', context) @login_required() def edit_post(request, post_id): context = {}", "'tags' tags_list = Tag.objects.all() paginator = Paginator(list(reversed(tags_list)), 10) page =", "if request.method == 'POST': form = TagForm(request.POST, request.FILES) if form.is_valid():", "messages.success(request, 'Post created.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request,", "if request.method == 'POST': form = CategoryForm(request.POST, request.FILES, instance=category) if", "form return render(request, 'backend/edit_post.html', context) @login_required() def delete_post(request, post_id): context", "paginator.page(1) except EmptyPage: tags = paginator.page(paginator.num_pages) context['tags'] = tags return", "form.is_valid(): form.save() messages.success(request, 'Post updated.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form", "django.core.urlresolvers import reverse from django.shortcuts import render from django.http import", "django.shortcuts import render from django.http import HttpResponseRedirect from core.models import", "@login_required() def posts(request): context = {} context['nav_active'] = 'posts' post_list", "'backend/edit_category.html', context) @login_required() def edit_category(request, category_id): context = {} context['nav_active']", "{} context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) context['tag'] = tag", "= form return render(request, 'backend/edit_tag.html', context) @login_required() def delete_tag(request, tag_id):", "== 'POST': form = PostForm(request.POST, request.FILES, instance=post) if form.is_valid(): form.save()", "TagForm() if request.method == 'POST': form = TagForm(request.POST, request.FILES) if", "context) @login_required() def add_tag(request): context = {} context['nav_active'] = 'tags'", "= TagForm(instance=tag) if request.method == 'POST': form = TagForm(request.POST, request.FILES,", "= request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts =", "= Paginator(list(reversed(categories_list)), 10) page = request.GET.get('page') try: categories = paginator.page(page)", "render(request, 'backend/edit_post.html', context) @login_required() def delete_post(request, post_id): context = {}", "render(request, 'backend/index.html', context) @login_required() def posts(request): context = {} context['nav_active']", "HttpResponseRedirect(reverse('user_panel_categories')) @login_required() def tags(request): context = {} context['nav_active'] = 'tags'", "= paginator.page(page) except PageNotAnInteger: tags = paginator.page(1) except EmptyPage: tags", "CategoryForm, TagForm # Create your views here. @login_required() def index(request):", "{} context['nav_active'] = 'categories' categories_list = Category.objects.all() paginator = Paginator(list(reversed(categories_list)),", "= tags return render(request, 'backend/tags.html', context) @login_required() def add_tag(request): context", "'categories' category = Category.objects.get(pk=category_id) context['category'] = category form = CategoryForm(instance=category)", "'categories' form = CategoryForm() if request.method == 'POST': form =", "def tags(request): context = {} context['nav_active'] = 'tags' tags_list =", "= 'posts' form = PostForm() if request.method == 'POST': form", "= 'posts' post = Post.objects.get(pk=post_id) post.delete() messages.success(request, 'Post deleted.') return", "{} context['nav_active'] = 'posts' form = PostForm() if request.method ==", "django.http import HttpResponseRedirect from core.models import Post, Category, Tag from", "except PageNotAnInteger: categories = paginator.page(1) except EmptyPage: categories = paginator.page(paginator.num_pages)", "except EmptyPage: posts = paginator.page(paginator.num_pages) context['posts'] = posts return render(request,", "'categories' categories_list = Category.objects.all() paginator = Paginator(list(reversed(categories_list)), 10) page =", "render(request, 'backend/edit_tag.html', context) @login_required() def delete_tag(request, tag_id): context = {}", "context) @login_required() def delete_tag(request, tag_id): context = {} context['nav_active'] =", "except PageNotAnInteger: tags = paginator.page(1) except EmptyPage: tags = paginator.page(paginator.num_pages)", "categories = paginator.page(paginator.num_pages) context['categories'] = categories return render(request, 'backend/categories.html', context)", "return render(request, 'backend/edit_tag.html', context) @login_required() def edit_tag(request, tag_id): context =", "tag = Tag.objects.get(pk=tag_id) context['tag'] = tag form = TagForm(instance=tag) if", "category.delete() messages.success(request, 'Category deleted.') return HttpResponseRedirect(reverse('user_panel_categories')) @login_required() def tags(request): context", "'Post created.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request, 'backend/edit_post.html',", "categories = paginator.page(1) except EmptyPage: categories = paginator.page(paginator.num_pages) context['categories'] =", "tags = paginator.page(1) except EmptyPage: tags = paginator.page(paginator.num_pages) context['tags'] =", "form.is_valid(): form.save() messages.success(request, 'Tag created.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form", "context['post'] = post form = PostForm(instance=post) if request.method == 'POST':", "= paginator.page(paginator.num_pages) context['tags'] = tags return render(request, 'backend/tags.html', context) @login_required()", "TagForm # Create your views here. @login_required() def index(request): context", "if request.method == 'POST': form = PostForm(request.POST, request.FILES, instance=post) if", "= request.GET.get('page') try: tags = paginator.page(page) except PageNotAnInteger: tags =", "= 'posts' post = Post.objects.get(pk=post_id) context['post'] = post form =", "'posts' post = Post.objects.get(pk=post_id) post.delete() messages.success(request, 'Post deleted.') return HttpResponseRedirect(reverse('user_panel_posts'))", "@login_required() def tags(request): context = {} context['nav_active'] = 'tags' tags_list", "HttpResponseRedirect from core.models import Post, Category, Tag from backend.forms import", "def add_post(request): context = {} context['nav_active'] = 'posts' form =", "request.FILES) if form.is_valid(): form.save() messages.success(request, 'Tag created.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form']", "TagForm(instance=tag) if request.method == 'POST': form = TagForm(request.POST, request.FILES, instance=tag)", "'POST': form = TagForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Tag", "PageNotAnInteger: categories = paginator.page(1) except EmptyPage: categories = paginator.page(paginator.num_pages) context['categories']", "EmptyPage: categories = paginator.page(paginator.num_pages) context['categories'] = categories return render(request, 'backend/categories.html',", "import HttpResponseRedirect from core.models import Post, Category, Tag from backend.forms", "updated.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request, 'backend/edit_category.html', context)", "paginator = Paginator(list(reversed(tags_list)), 10) page = request.GET.get('page') try: tags =", "'tags' form = TagForm() if request.method == 'POST': form =", "Paginator(list(reversed(categories_list)), 10) page = request.GET.get('page') try: categories = paginator.page(page) except", "= Tag.objects.all() paginator = Paginator(list(reversed(tags_list)), 10) page = request.GET.get('page') try:", "messages from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, PageNotAnInteger,", "category = Category.objects.get(pk=category_id) context['category'] = category form = CategoryForm(instance=category) if", "tags_list = Tag.objects.all() paginator = Paginator(list(reversed(tags_list)), 10) page = request.GET.get('page')", "category_id): context = {} context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id)", "'Post updated.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request, 'backend/edit_post.html',", "import Post, Category, Tag from backend.forms import PostForm, CategoryForm, TagForm", "page = request.GET.get('page') try: tags = paginator.page(page) except PageNotAnInteger: tags", "= tag form = TagForm(instance=tag) if request.method == 'POST': form", "'backend/edit_tag.html', context) @login_required() def delete_tag(request, tag_id): context = {} context['nav_active']", "from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers import reverse", "return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request, 'backend/edit_category.html', context) @login_required()", "# Create your views here. @login_required() def index(request): context =", "context['nav_active'] = 'tags' tags_list = Tag.objects.all() paginator = Paginator(list(reversed(tags_list)), 10)", "TagForm(request.POST, request.FILES, instance=tag) if form.is_valid(): form.save() messages.success(request, 'Tag updated.') return", "return render(request, 'backend/edit_post.html', context) @login_required() def edit_post(request, post_id): context =", "except EmptyPage: categories = paginator.page(paginator.num_pages) context['categories'] = categories return render(request,", "form = CategoryForm() if request.method == 'POST': form = CategoryForm(request.POST,", "= paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) context['posts'] = posts", "def delete_post(request, post_id): context = {} context['nav_active'] = 'posts' post", "form.save() messages.success(request, 'Post updated.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return", "context['form'] = form return render(request, 'backend/edit_tag.html', context) @login_required() def edit_tag(request,", "def edit_tag(request, tag_id): context = {} context['nav_active'] = 'tags' tag", "context) @login_required() def edit_post(request, post_id): context = {} context['nav_active'] =", "== 'POST': form = TagForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request,", "form = TagForm(instance=tag) if request.method == 'POST': form = TagForm(request.POST,", "form.is_valid(): form.save() messages.success(request, 'Category created.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form", "context = {} context['nav_active'] = 'index' return render(request, 'backend/index.html', context)", "post = Post.objects.get(pk=post_id) post.delete() messages.success(request, 'Post deleted.') return HttpResponseRedirect(reverse('user_panel_posts')) @login_required()", "instance=post) if form.is_valid(): form.save() messages.success(request, 'Post updated.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form']", "category = Category.objects.get(pk=category_id) category.delete() messages.success(request, 'Category deleted.') return HttpResponseRedirect(reverse('user_panel_categories')) @login_required()", "'backend/tags.html', context) @login_required() def add_tag(request): context = {} context['nav_active'] =", "PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) context['posts']", "context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) tag.delete() messages.success(request, 'Tag deleted.')", "return render(request, 'backend/index.html', context) @login_required() def posts(request): context = {}", "'categories' category = Category.objects.get(pk=category_id) category.delete() messages.success(request, 'Category deleted.') return HttpResponseRedirect(reverse('user_panel_categories'))", "backend.forms import PostForm, CategoryForm, TagForm # Create your views here.", "= TagForm(request.POST, request.FILES, instance=tag) if form.is_valid(): form.save() messages.success(request, 'Tag updated.')", "def categories(request): context = {} context['nav_active'] = 'categories' categories_list =", "paginator = Paginator(list(reversed(post_list)), 10) page = request.GET.get('page') try: posts =", "@login_required() def delete_post(request, post_id): context = {} context['nav_active'] = 'posts'", "HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request, 'backend/edit_tag.html', context) @login_required() def", "= {} context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) category.delete() messages.success(request,", "post form = PostForm(instance=post) if request.method == 'POST': form =", "core.models import Post, Category, Tag from backend.forms import PostForm, CategoryForm,", "def posts(request): context = {} context['nav_active'] = 'posts' post_list =", "'Category updated.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request, 'backend/edit_category.html',", "= {} context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) tag.delete() messages.success(request,", "= Post.objects.get(pk=post_id) post.delete() messages.success(request, 'Post deleted.') return HttpResponseRedirect(reverse('user_panel_posts')) @login_required() def", "tags = paginator.page(page) except PageNotAnInteger: tags = paginator.page(1) except EmptyPage:", "post.delete() messages.success(request, 'Post deleted.') return HttpResponseRedirect(reverse('user_panel_posts')) @login_required() def categories(request): context", "form = PostForm() if request.method == 'POST': form = PostForm(request.POST,", "context['form'] = form return render(request, 'backend/edit_post.html', context) @login_required() def edit_post(request,", "= 'categories' category = Category.objects.get(pk=category_id) category.delete() messages.success(request, 'Category deleted.') return", "request.method == 'POST': form = TagForm(request.POST, request.FILES, instance=tag) if form.is_valid():", "= 'categories' category = Category.objects.get(pk=category_id) context['category'] = category form =", "'backend/posts.html', context) @login_required() def add_post(request): context = {} context['nav_active'] =", "= {} context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) context['post'] =", "if form.is_valid(): form.save() messages.success(request, 'Category created.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] =", "Post.objects.get(pk=post_id) context['post'] = post form = PostForm(instance=post) if request.method ==", "context['form'] = form return render(request, 'backend/edit_tag.html', context) @login_required() def delete_tag(request,", "EmptyPage from django.core.urlresolvers import reverse from django.shortcuts import render from", "= Post.objects.all() paginator = Paginator(list(reversed(post_list)), 10) page = request.GET.get('page') try:", "context) @login_required() def edit_tag(request, tag_id): context = {} context['nav_active'] =", "messages.success(request, 'Tag updated.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request,", "= paginator.page(page) except PageNotAnInteger: categories = paginator.page(1) except EmptyPage: categories", "views here. @login_required() def index(request): context = {} context['nav_active'] =", "= PostForm(instance=post) if request.method == 'POST': form = PostForm(request.POST, request.FILES,", "'backend/categories.html', context) @login_required() def add_category(request): context = {} context['nav_active'] =", "'backend/edit_tag.html', context) @login_required() def edit_tag(request, tag_id): context = {} context['nav_active']", "10) page = request.GET.get('page') try: categories = paginator.page(page) except PageNotAnInteger:", "'backend/edit_post.html', context) @login_required() def delete_post(request, post_id): context = {} context['nav_active']", "context['nav_active'] = 'categories' form = CategoryForm() if request.method == 'POST':", "{} context['nav_active'] = 'posts' post_list = Post.objects.all() paginator = Paginator(list(reversed(post_list)),", "'tags' tag = Tag.objects.get(pk=tag_id) tag.delete() messages.success(request, 'Tag deleted.') return HttpResponseRedirect(reverse('user_panel_tags'))", "= {} context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) context['category'] =", "try: tags = paginator.page(page) except PageNotAnInteger: tags = paginator.page(1) except", "context = {} context['nav_active'] = 'tags' tags_list = Tag.objects.all() paginator", "render(request, 'backend/tags.html', context) @login_required() def add_tag(request): context = {} context['nav_active']", "from django.shortcuts import render from django.http import HttpResponseRedirect from core.models", "Create your views here. @login_required() def index(request): context = {}", "CategoryForm() if request.method == 'POST': form = CategoryForm(request.POST, request.FILES) if", "Paginator(list(reversed(tags_list)), 10) page = request.GET.get('page') try: tags = paginator.page(page) except", "form return render(request, 'backend/edit_category.html', context) @login_required() def delete_category(request, category_id): context", "from django.core.urlresolvers import reverse from django.shortcuts import render from django.http", "login_required from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers import", "{} context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) context['category'] = category", "Category.objects.all() paginator = Paginator(list(reversed(categories_list)), 10) page = request.GET.get('page') try: categories", "return render(request, 'backend/categories.html', context) @login_required() def add_category(request): context = {}", "page = request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts", "form = TagForm(request.POST, request.FILES, instance=tag) if form.is_valid(): form.save() messages.success(request, 'Tag", "= category form = CategoryForm(instance=category) if request.method == 'POST': form", "Tag.objects.get(pk=tag_id) context['tag'] = tag form = TagForm(instance=tag) if request.method ==", "Category, Tag from backend.forms import PostForm, CategoryForm, TagForm # Create", "{} context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) tag.delete() messages.success(request, 'Tag", "= Category.objects.get(pk=category_id) context['category'] = category form = CategoryForm(instance=category) if request.method", "post_id): context = {} context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id)", "context = {} context['nav_active'] = 'posts' post_list = Post.objects.all() paginator", "context['tag'] = tag form = TagForm(instance=tag) if request.method == 'POST':", "paginator.page(paginator.num_pages) context['categories'] = categories return render(request, 'backend/categories.html', context) @login_required() def", "messages.success(request, 'Tag created.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request,", "import Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers import reverse from django.shortcuts", "form = CategoryForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Category created.')", "return render(request, 'backend/edit_category.html', context) @login_required() def delete_category(request, category_id): context =", "= form return render(request, 'backend/edit_post.html', context) @login_required() def edit_post(request, post_id):", "= Paginator(list(reversed(tags_list)), 10) page = request.GET.get('page') try: tags = paginator.page(page)", "django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers import reverse from", "== 'POST': form = CategoryForm(request.POST, request.FILES, instance=category) if form.is_valid(): form.save()", "'Category created.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request, 'backend/edit_category.html',", "context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) context['tag'] = tag form", "reverse from django.shortcuts import render from django.http import HttpResponseRedirect from", "context['nav_active'] = 'index' return render(request, 'backend/index.html', context) @login_required() def posts(request):", "'POST': form = CategoryForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Category", "== 'POST': form = CategoryForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request,", "= form return render(request, 'backend/edit_category.html', context) @login_required() def delete_category(request, category_id):", "def index(request): context = {} context['nav_active'] = 'index' return render(request,", "{} context['nav_active'] = 'categories' form = CategoryForm() if request.method ==", "context = {} context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) context['category']", "request.FILES) if form.is_valid(): form.save() messages.success(request, 'Post created.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form']", "context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) post.delete() messages.success(request, 'Post deleted.')", "form = CategoryForm(request.POST, request.FILES, instance=category) if form.is_valid(): form.save() messages.success(request, 'Category", "paginator.page(page) except PageNotAnInteger: categories = paginator.page(1) except EmptyPage: categories =", "'posts' post = Post.objects.get(pk=post_id) context['post'] = post form = PostForm(instance=post)", "= Post.objects.get(pk=post_id) context['post'] = post form = PostForm(instance=post) if request.method", "= paginator.page(paginator.num_pages) context['categories'] = categories return render(request, 'backend/categories.html', context) @login_required()", "request.method == 'POST': form = TagForm(request.POST, request.FILES) if form.is_valid(): form.save()", "render(request, 'backend/edit_category.html', context) @login_required() def delete_category(request, category_id): context = {}", "@login_required() def edit_category(request, category_id): context = {} context['nav_active'] = 'categories'", "edit_category(request, category_id): context = {} context['nav_active'] = 'categories' category =", "import messages from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator,", "request.method == 'POST': form = CategoryForm(request.POST, request.FILES) if form.is_valid(): form.save()", "context) @login_required() def delete_category(request, category_id): context = {} context['nav_active'] =", "@login_required() def delete_category(request, category_id): context = {} context['nav_active'] = 'categories'", "return render(request, 'backend/edit_post.html', context) @login_required() def delete_post(request, post_id): context =", "= {} context['nav_active'] = 'tags' tags_list = Tag.objects.all() paginator =", "= paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts", "if form.is_valid(): form.save() messages.success(request, 'Post created.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] =", "return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request, 'backend/edit_post.html', context) @login_required()", "form.is_valid(): form.save() messages.success(request, 'Tag updated.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form", "context['nav_active'] = 'categories' categories_list = Category.objects.all() paginator = Paginator(list(reversed(categories_list)), 10)", "= paginator.page(1) except EmptyPage: categories = paginator.page(paginator.num_pages) context['categories'] = categories", "except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages)", "from core.models import Post, Category, Tag from backend.forms import PostForm,", "django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from", "= {} context['nav_active'] = 'categories' categories_list = Category.objects.all() paginator =", "deleted.') return HttpResponseRedirect(reverse('user_panel_posts')) @login_required() def categories(request): context = {} context['nav_active']", "category form = CategoryForm(instance=category) if request.method == 'POST': form =", "add_post(request): context = {} context['nav_active'] = 'posts' form = PostForm()", "form.save() messages.success(request, 'Category created.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return", "'Category deleted.') return HttpResponseRedirect(reverse('user_panel_categories')) @login_required() def tags(request): context = {}", "categories_list = Category.objects.all() paginator = Paginator(list(reversed(categories_list)), 10) page = request.GET.get('page')", "form.save() messages.success(request, 'Post created.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return", "{} context['nav_active'] = 'tags' form = TagForm() if request.method ==", "= post form = PostForm(instance=post) if request.method == 'POST': form", "@login_required() def index(request): context = {} context['nav_active'] = 'index' return", "CategoryForm(request.POST, request.FILES, instance=category) if form.is_valid(): form.save() messages.success(request, 'Category updated.') return", "except EmptyPage: tags = paginator.page(paginator.num_pages) context['tags'] = tags return render(request,", "def edit_post(request, post_id): context = {} context['nav_active'] = 'posts' post", "@login_required() def add_post(request): context = {} context['nav_active'] = 'posts' form", "render(request, 'backend/edit_category.html', context) @login_required() def edit_category(request, category_id): context = {}", "EmptyPage: posts = paginator.page(paginator.num_pages) context['posts'] = posts return render(request, 'backend/posts.html',", "post_list = Post.objects.all() paginator = Paginator(list(reversed(post_list)), 10) page = request.GET.get('page')", "from django.http import HttpResponseRedirect from core.models import Post, Category, Tag", "tag form = TagForm(instance=tag) if request.method == 'POST': form =", "== 'POST': form = TagForm(request.POST, request.FILES, instance=tag) if form.is_valid(): form.save()", "context = {} context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) context['post']", "add_tag(request): context = {} context['nav_active'] = 'tags' form = TagForm()", "updated.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request, 'backend/edit_tag.html', context)", "render(request, 'backend/edit_tag.html', context) @login_required() def edit_tag(request, tag_id): context = {}", "@login_required() def delete_tag(request, tag_id): context = {} context['nav_active'] = 'tags'", "'POST': form = PostForm(request.POST, request.FILES, instance=post) if form.is_valid(): form.save() messages.success(request,", "@login_required() def categories(request): context = {} context['nav_active'] = 'categories' categories_list", "context = {} context['nav_active'] = 'categories' categories_list = Category.objects.all() paginator", "= request.GET.get('page') try: categories = paginator.page(page) except PageNotAnInteger: categories =", "context['form'] = form return render(request, 'backend/edit_post.html', context) @login_required() def delete_post(request,", "= {} context['nav_active'] = 'posts' post_list = Post.objects.all() paginator =", "paginator.page(paginator.num_pages) context['tags'] = tags return render(request, 'backend/tags.html', context) @login_required() def", "Post, Category, Tag from backend.forms import PostForm, CategoryForm, TagForm #", "return render(request, 'backend/posts.html', context) @login_required() def add_post(request): context = {}", "context = {} context['nav_active'] = 'categories' form = CategoryForm() if", "messages.success(request, 'Category created.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request,", "CategoryForm(instance=category) if request.method == 'POST': form = CategoryForm(request.POST, request.FILES, instance=category)", "delete_category(request, category_id): context = {} context['nav_active'] = 'categories' category =", "Tag from backend.forms import PostForm, CategoryForm, TagForm # Create your", "context = {} context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) context['tag']", "instance=tag) if form.is_valid(): form.save() messages.success(request, 'Tag updated.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form']", "== 'POST': form = PostForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request,", "= form return render(request, 'backend/edit_post.html', context) @login_required() def delete_post(request, post_id):", "@login_required() def add_tag(request): context = {} context['nav_active'] = 'tags' form", "PostForm(instance=post) if request.method == 'POST': form = PostForm(request.POST, request.FILES, instance=post)", "= 'categories' form = CategoryForm() if request.method == 'POST': form", "paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts =", "context['nav_active'] = 'tags' form = TagForm() if request.method == 'POST':", "paginator.page(paginator.num_pages) context['posts'] = posts return render(request, 'backend/posts.html', context) @login_required() def", "your views here. @login_required() def index(request): context = {} context['nav_active']", "= paginator.page(paginator.num_pages) context['posts'] = posts return render(request, 'backend/posts.html', context) @login_required()", "messages.success(request, 'Category updated.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request,", "Paginator(list(reversed(post_list)), 10) page = request.GET.get('page') try: posts = paginator.page(page) except", "'index' return render(request, 'backend/index.html', context) @login_required() def posts(request): context =", "posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage:", "request.FILES) if form.is_valid(): form.save() messages.success(request, 'Category created.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form']", "Post.objects.all() paginator = Paginator(list(reversed(post_list)), 10) page = request.GET.get('page') try: posts", "= form return render(request, 'backend/edit_tag.html', context) @login_required() def edit_tag(request, tag_id):", "context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) context['post'] = post form", "context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) category.delete() messages.success(request, 'Category deleted.')", "context = {} context['nav_active'] = 'posts' form = PostForm() if", "posts(request): context = {} context['nav_active'] = 'posts' post_list = Post.objects.all()", "render from django.http import HttpResponseRedirect from core.models import Post, Category,", "from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage", "form return render(request, 'backend/edit_tag.html', context) @login_required() def delete_tag(request, tag_id): context", "created.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request, 'backend/edit_category.html', context)", "request.FILES, instance=tag) if form.is_valid(): form.save() messages.success(request, 'Tag updated.') return HttpResponseRedirect(reverse('user_panel_tags'))", "def add_category(request): context = {} context['nav_active'] = 'categories' form =", "= form return render(request, 'backend/edit_category.html', context) @login_required() def edit_category(request, category_id):", "import PostForm, CategoryForm, TagForm # Create your views here. @login_required()", "return render(request, 'backend/edit_category.html', context) @login_required() def edit_category(request, category_id): context =", "context['form'] = form return render(request, 'backend/edit_category.html', context) @login_required() def delete_category(request,", "@login_required() def add_category(request): context = {} context['nav_active'] = 'categories' form", "'POST': form = PostForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Post", "= PostForm() if request.method == 'POST': form = PostForm(request.POST, request.FILES)", "= {} context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) post.delete() messages.success(request,", "return HttpResponseRedirect(reverse('user_panel_categories')) @login_required() def tags(request): context = {} context['nav_active'] =", "form.is_valid(): form.save() messages.success(request, 'Post created.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form", "return render(request, 'backend/tags.html', context) @login_required() def add_tag(request): context = {}", "context['nav_active'] = 'posts' form = PostForm() if request.method == 'POST':", "HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request, 'backend/edit_post.html', context) @login_required() def", "Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers import reverse from django.shortcuts import", "categories return render(request, 'backend/categories.html', context) @login_required() def add_category(request): context =", "instance=category) if form.is_valid(): form.save() messages.success(request, 'Category updated.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form']", "context) @login_required() def delete_post(request, post_id): context = {} context['nav_active'] =", "def edit_category(request, category_id): context = {} context['nav_active'] = 'categories' category", "deleted.') return HttpResponseRedirect(reverse('user_panel_categories')) @login_required() def tags(request): context = {} context['nav_active']", "try: categories = paginator.page(page) except PageNotAnInteger: categories = paginator.page(1) except", "if form.is_valid(): form.save() messages.success(request, 'Tag created.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] =", "context['category'] = category form = CategoryForm(instance=category) if request.method == 'POST':", "delete_post(request, post_id): context = {} context['nav_active'] = 'posts' post =", "= paginator.page(1) except EmptyPage: tags = paginator.page(paginator.num_pages) context['tags'] = tags", "request.FILES, instance=post) if form.is_valid(): form.save() messages.success(request, 'Post updated.') return HttpResponseRedirect(reverse('user_panel_posts'))", "tags(request): context = {} context['nav_active'] = 'tags' tags_list = Tag.objects.all()", "PostForm() if request.method == 'POST': form = PostForm(request.POST, request.FILES) if", "= categories return render(request, 'backend/categories.html', context) @login_required() def add_category(request): context", "paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) context['posts'] = posts return", "if form.is_valid(): form.save() messages.success(request, 'Post updated.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] =", "page = request.GET.get('page') try: categories = paginator.page(page) except PageNotAnInteger: categories", "context) @login_required() def add_post(request): context = {} context['nav_active'] = 'posts'", "'posts' form = PostForm() if request.method == 'POST': form =", "messages.success(request, 'Post deleted.') return HttpResponseRedirect(reverse('user_panel_posts')) @login_required() def categories(request): context =", "= {} context['nav_active'] = 'tags' form = TagForm() if request.method", "{} context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) post.delete() messages.success(request, 'Post", "= Category.objects.all() paginator = Paginator(list(reversed(categories_list)), 10) page = request.GET.get('page') try:", "updated.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request, 'backend/edit_post.html', context)", "{} context['nav_active'] = 'tags' tags_list = Tag.objects.all() paginator = Paginator(list(reversed(tags_list)),", "'Tag created.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request, 'backend/edit_tag.html',", "request.method == 'POST': form = PostForm(request.POST, request.FILES, instance=post) if form.is_valid():", "'backend/edit_post.html', context) @login_required() def edit_post(request, post_id): context = {} context['nav_active']" ]
[ ") -> Tuple[_mp.Process, IRPCModelSession]: client_conn, server_conn = _mp.Pipe() proc =", "as _mp import uuid import zipfile from concurrent.futures import Future", "= uuid.uuid4().hex self._datasets[id_] = {\"mean\": mean, \"stddev\": stddev} return id_", "res def create_dataset(self, mean, stddev): id_ = uuid.uuid4().hex self._datasets[id_] =", "import List, Optional, Tuple import numpy from tiktorch import log", "_mp_rpc from tiktorch.rpc.mp import MPServer from tiktorch.server.reader import eval_model_zip from", "forward(self, input_tensor: numpy.ndarray) -> Future: res = self._worker.forward(input_tensor) return res", "Future: res = self._worker.forward(input_tensor) return res def create_dataset(self, mean, stddev):", "devices: List[str], log_queue: Optional[_mp.Queue] = None ): try: # from:", "on windows if log_queue: log.configure(log_queue) session_proc = ModelSessionProcess(model_zip, devices) srv", "ModelSessionProcess(model_zip, devices) srv = MPServer(session_proc, conn) srv.listen() def start_model_session_process( model_zip:", "multiprocessing as _mp import uuid import zipfile from concurrent.futures import", "Tuple import numpy from tiktorch import log from tiktorch.rpc import", "List[Tuple[str, float]] class ModelSessionProcess(IRPCModelSession): def __init__(self, model_zip: bytes, devices: List[str])", "IRPCModelSession]: client_conn, server_conn = _mp.Pipe() proc = _mp.Process( target=_run_model_session_process, name=\"ModelSessionProcess\",", "self._model.name, self._model.input_axes, self._model.output_axes, valid_shapes=[self._model.input_shape], halo=self._model.halo, scale=self._model.scale, offset=self._model.offset, ) def shutdown(self)", "proc = _mp.Process( target=_run_model_session_process, name=\"ModelSessionProcess\", kwargs={\"conn\": server_conn, \"devices\": devices, \"log_queue\":", "List[str]) -> None: with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file: self._model = eval_model_zip(model_file,", "log from tiktorch.rpc import Shutdown from tiktorch.rpc import mp as", "import IRPCModelSession @dataclasses.dataclass class ModelInfo: # TODO: Test for model", "List[Tuple[str, int]] offset: List[Tuple[str, int]] scale: List[Tuple[str, float]] class ModelSessionProcess(IRPCModelSession):", "as model_file: self._model = eval_model_zip(model_file, devices) self._datasets = {} self._worker", "def create_dataset(self, mean, stddev): id_ = uuid.uuid4().hex self._datasets[id_] = {\"mean\":", "Optional[_mp.Queue] = None ) -> Tuple[_mp.Process, IRPCModelSession]: client_conn, server_conn =", "ModuleNotFoundError: pass # probably running on windows if log_queue: log.configure(log_queue)", "stddev} return id_ def get_model_info(self) -> ModelInfo: return ModelInfo( self._model.name,", "as _mp_rpc from tiktorch.rpc.mp import MPServer from tiktorch.server.reader import eval_model_zip", "halo=self._model.halo, scale=self._model.scale, offset=self._model.offset, ) def shutdown(self) -> Shutdown: self._worker.shutdown() return", "# from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096,", "Shutdown from tiktorch.rpc import mp as _mp_rpc from tiktorch.rpc.mp import", "Future from multiprocessing.connection import Connection from typing import List, Optional,", "_run_model_session_process( conn: Connection, model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] =", "offset=self._model.offset, ) def shutdown(self) -> Shutdown: self._worker.shutdown() return Shutdown() def", ".rpc_interface import IRPCModelSession @dataclasses.dataclass class ModelInfo: # TODO: Test for", "bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None ): try: #", "Tuple[_mp.Process, IRPCModelSession]: client_conn, server_conn = _mp.Pipe() proc = _mp.Process( target=_run_model_session_process,", "{} self._worker = base.SessionBackend(self._model) def forward(self, input_tensor: numpy.ndarray) -> Future:", "server_conn, \"devices\": devices, \"log_queue\": log_queue, \"model_zip\": model_zip}, ) proc.start() return", "-> Tuple[_mp.Process, IRPCModelSession]: client_conn, server_conn = _mp.Pipe() proc = _mp.Process(", "IRPCModelSession @dataclasses.dataclass class ModelInfo: # TODO: Test for model info", "self._model.input_axes, self._model.output_axes, valid_shapes=[self._model.input_shape], halo=self._model.halo, scale=self._model.scale, offset=self._model.offset, ) def shutdown(self) ->", "info name: str input_axes: str output_axes: str valid_shapes: List[List[Tuple[str, int]]]", "model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None ) ->", "Shutdown() def _run_model_session_process( conn: Connection, model_zip: bytes, devices: List[str], log_queue:", "TODO: Test for model info name: str input_axes: str output_axes:", "base.SessionBackend(self._model) def forward(self, input_tensor: numpy.ndarray) -> Future: res = self._worker.forward(input_tensor)", "pass # probably running on windows if log_queue: log.configure(log_queue) session_proc", "devices) srv = MPServer(session_proc, conn) srv.listen() def start_model_session_process( model_zip: bytes,", "def _run_model_session_process( conn: Connection, model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue]", "= {\"mean\": mean, \"stddev\": stddev} return id_ def get_model_info(self) ->", "import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) except ModuleNotFoundError:", "@dataclasses.dataclass class ModelInfo: # TODO: Test for model info name:", "import Future from multiprocessing.connection import Connection from typing import List,", "ModelInfo: return ModelInfo( self._model.name, self._model.input_axes, self._model.output_axes, valid_shapes=[self._model.input_shape], halo=self._model.halo, scale=self._model.scale, offset=self._model.offset,", "Connection, model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None ):", "_mp.Pipe() proc = _mp.Process( target=_run_model_session_process, name=\"ModelSessionProcess\", kwargs={\"conn\": server_conn, \"devices\": devices,", "self._worker.shutdown() return Shutdown() def _run_model_session_process( conn: Connection, model_zip: bytes, devices:", "\"stddev\": stddev} return id_ def get_model_info(self) -> ModelInfo: return ModelInfo(", "mean, \"stddev\": stddev} return id_ def get_model_info(self) -> ModelInfo: return", "List[List[Tuple[str, int]]] halo: List[Tuple[str, int]] offset: List[Tuple[str, int]] scale: List[Tuple[str,", "devices: List[str]) -> None: with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file: self._model =", "srv = MPServer(session_proc, conn) srv.listen() def start_model_session_process( model_zip: bytes, devices:", "https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) except", "start_model_session_process( model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None )", "class ModelSessionProcess(IRPCModelSession): def __init__(self, model_zip: bytes, devices: List[str]) -> None:", "tiktorch.rpc import Shutdown from tiktorch.rpc import mp as _mp_rpc from", "with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file: self._model = eval_model_zip(model_file, devices) self._datasets =", "= {} self._worker = base.SessionBackend(self._model) def forward(self, input_tensor: numpy.ndarray) ->", "id_ def get_model_info(self) -> ModelInfo: return ModelInfo( self._model.name, self._model.input_axes, self._model.output_axes,", "{\"mean\": mean, \"stddev\": stddev} return id_ def get_model_info(self) -> ModelInfo:", "import multiprocessing as _mp import uuid import zipfile from concurrent.futures", "eval_model_zip from .backend import base from .rpc_interface import IRPCModelSession @dataclasses.dataclass", "= MPServer(session_proc, conn) srv.listen() def start_model_session_process( model_zip: bytes, devices: List[str],", "ModelInfo: # TODO: Test for model info name: str input_axes:", "tiktorch.rpc import mp as _mp_rpc from tiktorch.rpc.mp import MPServer from", "self._model.output_axes, valid_shapes=[self._model.input_shape], halo=self._model.halo, scale=self._model.scale, offset=self._model.offset, ) def shutdown(self) -> Shutdown:", "except ModuleNotFoundError: pass # probably running on windows if log_queue:", "log_queue: log.configure(log_queue) session_proc = ModelSessionProcess(model_zip, devices) srv = MPServer(session_proc, conn)", "conn) srv.listen() def start_model_session_process( model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue]", "windows if log_queue: log.configure(log_queue) session_proc = ModelSessionProcess(model_zip, devices) srv =", "output_axes: str valid_shapes: List[List[Tuple[str, int]]] halo: List[Tuple[str, int]] offset: List[Tuple[str,", "import base from .rpc_interface import IRPCModelSession @dataclasses.dataclass class ModelInfo: #", "__init__(self, model_zip: bytes, devices: List[str]) -> None: with zipfile.ZipFile(io.BytesIO(model_zip)) as", "float]] class ModelSessionProcess(IRPCModelSession): def __init__(self, model_zip: bytes, devices: List[str]) ->", "from typing import List, Optional, Tuple import numpy from tiktorch", "model_file: self._model = eval_model_zip(model_file, devices) self._datasets = {} self._worker =", "zipfile from concurrent.futures import Future from multiprocessing.connection import Connection from", "id_ = uuid.uuid4().hex self._datasets[id_] = {\"mean\": mean, \"stddev\": stddev} return", "# probably running on windows if log_queue: log.configure(log_queue) session_proc =", "log.configure(log_queue) session_proc = ModelSessionProcess(model_zip, devices) srv = MPServer(session_proc, conn) srv.listen()", "uuid.uuid4().hex self._datasets[id_] = {\"mean\": mean, \"stddev\": stddev} return id_ def", "# TODO: Test for model info name: str input_axes: str", "class ModelInfo: # TODO: Test for model info name: str", "import Shutdown from tiktorch.rpc import mp as _mp_rpc from tiktorch.rpc.mp", "scale: List[Tuple[str, float]] class ModelSessionProcess(IRPCModelSession): def __init__(self, model_zip: bytes, devices:", "_mp import uuid import zipfile from concurrent.futures import Future from", "def shutdown(self) -> Shutdown: self._worker.shutdown() return Shutdown() def _run_model_session_process( conn:", "None ): try: # from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import resource rlimit =", "from tiktorch.rpc.mp import MPServer from tiktorch.server.reader import eval_model_zip from .backend", "def forward(self, input_tensor: numpy.ndarray) -> Future: res = self._worker.forward(input_tensor) return", "List, Optional, Tuple import numpy from tiktorch import log from", "return id_ def get_model_info(self) -> ModelInfo: return ModelInfo( self._model.name, self._model.input_axes,", "-> Future: res = self._worker.forward(input_tensor) return res def create_dataset(self, mean,", "tiktorch.rpc.mp import MPServer from tiktorch.server.reader import eval_model_zip from .backend import", "model info name: str input_axes: str output_axes: str valid_shapes: List[List[Tuple[str,", "from multiprocessing.connection import Connection from typing import List, Optional, Tuple", "ModelSessionProcess(IRPCModelSession): def __init__(self, model_zip: bytes, devices: List[str]) -> None: with", "halo: List[Tuple[str, int]] offset: List[Tuple[str, int]] scale: List[Tuple[str, float]] class", "zipfile.ZipFile(io.BytesIO(model_zip)) as model_file: self._model = eval_model_zip(model_file, devices) self._datasets = {}", "= self._worker.forward(input_tensor) return res def create_dataset(self, mean, stddev): id_ =", "import uuid import zipfile from concurrent.futures import Future from multiprocessing.connection", "input_tensor: numpy.ndarray) -> Future: res = self._worker.forward(input_tensor) return res def", "input_axes: str output_axes: str valid_shapes: List[List[Tuple[str, int]]] halo: List[Tuple[str, int]]", "model_zip: bytes, devices: List[str]) -> None: with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file:", "self._worker = base.SessionBackend(self._model) def forward(self, input_tensor: numpy.ndarray) -> Future: res", "concurrent.futures import Future from multiprocessing.connection import Connection from typing import", "session_proc = ModelSessionProcess(model_zip, devices) srv = MPServer(session_proc, conn) srv.listen() def", "ModelInfo( self._model.name, self._model.input_axes, self._model.output_axes, valid_shapes=[self._model.input_shape], halo=self._model.halo, scale=self._model.scale, offset=self._model.offset, ) def", "client_conn, server_conn = _mp.Pipe() proc = _mp.Process( target=_run_model_session_process, name=\"ModelSessionProcess\", kwargs={\"conn\":", "import io import multiprocessing as _mp import uuid import zipfile", "MPServer from tiktorch.server.reader import eval_model_zip from .backend import base from", "from .rpc_interface import IRPCModelSession @dataclasses.dataclass class ModelInfo: # TODO: Test", "rlimit[1])) except ModuleNotFoundError: pass # probably running on windows if", "probably running on windows if log_queue: log.configure(log_queue) session_proc = ModelSessionProcess(model_zip,", "mean, stddev): id_ = uuid.uuid4().hex self._datasets[id_] = {\"mean\": mean, \"stddev\":", "valid_shapes=[self._model.input_shape], halo=self._model.halo, scale=self._model.scale, offset=self._model.offset, ) def shutdown(self) -> Shutdown: self._worker.shutdown()", "= base.SessionBackend(self._model) def forward(self, input_tensor: numpy.ndarray) -> Future: res =", "for model info name: str input_axes: str output_axes: str valid_shapes:", "_mp.Process( target=_run_model_session_process, name=\"ModelSessionProcess\", kwargs={\"conn\": server_conn, \"devices\": devices, \"log_queue\": log_queue, \"model_zip\":", "valid_shapes: List[List[Tuple[str, int]]] halo: List[Tuple[str, int]] offset: List[Tuple[str, int]] scale:", "bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None ) -> Tuple[_mp.Process,", "-> None: with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file: self._model = eval_model_zip(model_file, devices)", "\"devices\": devices, \"log_queue\": log_queue, \"model_zip\": model_zip}, ) proc.start() return proc,", "resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) except ModuleNotFoundError: pass # probably running on", "List[Tuple[str, int]] scale: List[Tuple[str, float]] class ModelSessionProcess(IRPCModelSession): def __init__(self, model_zip:", "create_dataset(self, mean, stddev): id_ = uuid.uuid4().hex self._datasets[id_] = {\"mean\": mean,", ") def shutdown(self) -> Shutdown: self._worker.shutdown() return Shutdown() def _run_model_session_process(", "int]] scale: List[Tuple[str, float]] class ModelSessionProcess(IRPCModelSession): def __init__(self, model_zip: bytes,", "import MPServer from tiktorch.server.reader import eval_model_zip from .backend import base", "return Shutdown() def _run_model_session_process( conn: Connection, model_zip: bytes, devices: List[str],", "= eval_model_zip(model_file, devices) self._datasets = {} self._worker = base.SessionBackend(self._model) def", "name=\"ModelSessionProcess\", kwargs={\"conn\": server_conn, \"devices\": devices, \"log_queue\": log_queue, \"model_zip\": model_zip}, )", "if log_queue: log.configure(log_queue) session_proc = ModelSessionProcess(model_zip, devices) srv = MPServer(session_proc,", "offset: List[Tuple[str, int]] scale: List[Tuple[str, float]] class ModelSessionProcess(IRPCModelSession): def __init__(self,", "multiprocessing.connection import Connection from typing import List, Optional, Tuple import", "Connection from typing import List, Optional, Tuple import numpy from", "numpy from tiktorch import log from tiktorch.rpc import Shutdown from", ".backend import base from .rpc_interface import IRPCModelSession @dataclasses.dataclass class ModelInfo:", "conn: Connection, model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None", "def start_model_session_process( model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None", "stddev): id_ = uuid.uuid4().hex self._datasets[id_] = {\"mean\": mean, \"stddev\": stddev}", "devices) self._datasets = {} self._worker = base.SessionBackend(self._model) def forward(self, input_tensor:", "= ModelSessionProcess(model_zip, devices) srv = MPServer(session_proc, conn) srv.listen() def start_model_session_process(", "name: str input_axes: str output_axes: str valid_shapes: List[List[Tuple[str, int]]] halo:", "bytes, devices: List[str]) -> None: with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file: self._model", "self._worker.forward(input_tensor) return res def create_dataset(self, mean, stddev): id_ = uuid.uuid4().hex", "try: # from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE,", "return res def create_dataset(self, mean, stddev): id_ = uuid.uuid4().hex self._datasets[id_]", "shutdown(self) -> Shutdown: self._worker.shutdown() return Shutdown() def _run_model_session_process( conn: Connection,", "base from .rpc_interface import IRPCModelSession @dataclasses.dataclass class ModelInfo: # TODO:", "from tiktorch.rpc import mp as _mp_rpc from tiktorch.rpc.mp import MPServer", "import Connection from typing import List, Optional, Tuple import numpy", "int]] offset: List[Tuple[str, int]] scale: List[Tuple[str, float]] class ModelSessionProcess(IRPCModelSession): def", "eval_model_zip(model_file, devices) self._datasets = {} self._worker = base.SessionBackend(self._model) def forward(self,", "(4096, rlimit[1])) except ModuleNotFoundError: pass # probably running on windows", "resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) except ModuleNotFoundError: pass", "kwargs={\"conn\": server_conn, \"devices\": devices, \"log_queue\": log_queue, \"model_zip\": model_zip}, ) proc.start()", "target=_run_model_session_process, name=\"ModelSessionProcess\", kwargs={\"conn\": server_conn, \"devices\": devices, \"log_queue\": log_queue, \"model_zip\": model_zip},", "import dataclasses import io import multiprocessing as _mp import uuid", "from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))", "return ModelInfo( self._model.name, self._model.input_axes, self._model.output_axes, valid_shapes=[self._model.input_shape], halo=self._model.halo, scale=self._model.scale, offset=self._model.offset, )", "running on windows if log_queue: log.configure(log_queue) session_proc = ModelSessionProcess(model_zip, devices)", "from concurrent.futures import Future from multiprocessing.connection import Connection from typing", "server_conn = _mp.Pipe() proc = _mp.Process( target=_run_model_session_process, name=\"ModelSessionProcess\", kwargs={\"conn\": server_conn,", "typing import List, Optional, Tuple import numpy from tiktorch import", "= _mp.Process( target=_run_model_session_process, name=\"ModelSessionProcess\", kwargs={\"conn\": server_conn, \"devices\": devices, \"log_queue\": log_queue,", "from tiktorch.server.reader import eval_model_zip from .backend import base from .rpc_interface", "str valid_shapes: List[List[Tuple[str, int]]] halo: List[Tuple[str, int]] offset: List[Tuple[str, int]]", "None: with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file: self._model = eval_model_zip(model_file, devices) self._datasets", "= resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) except ModuleNotFoundError: pass # probably", "self._datasets[id_] = {\"mean\": mean, \"stddev\": stddev} return id_ def get_model_info(self)", "Test for model info name: str input_axes: str output_axes: str", "= None ) -> Tuple[_mp.Process, IRPCModelSession]: client_conn, server_conn = _mp.Pipe()", "= None ): try: # from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import resource rlimit", "dataclasses import io import multiprocessing as _mp import uuid import", "import zipfile from concurrent.futures import Future from multiprocessing.connection import Connection", "log_queue: Optional[_mp.Queue] = None ) -> Tuple[_mp.Process, IRPCModelSession]: client_conn, server_conn", "devices, \"log_queue\": log_queue, \"model_zip\": model_zip}, ) proc.start() return proc, _mp_rpc.create_client(IRPCModelSession,", "tiktorch.server.reader import eval_model_zip from .backend import base from .rpc_interface import", "-> Shutdown: self._worker.shutdown() return Shutdown() def _run_model_session_process( conn: Connection, model_zip:", "MPServer(session_proc, conn) srv.listen() def start_model_session_process( model_zip: bytes, devices: List[str], log_queue:", "numpy.ndarray) -> Future: res = self._worker.forward(input_tensor) return res def create_dataset(self,", "from .backend import base from .rpc_interface import IRPCModelSession @dataclasses.dataclass class", "Optional[_mp.Queue] = None ): try: # from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import resource", "from tiktorch import log from tiktorch.rpc import Shutdown from tiktorch.rpc", "get_model_info(self) -> ModelInfo: return ModelInfo( self._model.name, self._model.input_axes, self._model.output_axes, valid_shapes=[self._model.input_shape], halo=self._model.halo,", "self._datasets = {} self._worker = base.SessionBackend(self._model) def forward(self, input_tensor: numpy.ndarray)", "Shutdown: self._worker.shutdown() return Shutdown() def _run_model_session_process( conn: Connection, model_zip: bytes,", "rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) except ModuleNotFoundError: pass #", "res = self._worker.forward(input_tensor) return res def create_dataset(self, mean, stddev): id_", "Optional, Tuple import numpy from tiktorch import log from tiktorch.rpc", "tiktorch import log from tiktorch.rpc import Shutdown from tiktorch.rpc import", "uuid import zipfile from concurrent.futures import Future from multiprocessing.connection import", "mp as _mp_rpc from tiktorch.rpc.mp import MPServer from tiktorch.server.reader import", "from tiktorch.rpc import Shutdown from tiktorch.rpc import mp as _mp_rpc", "List[str], log_queue: Optional[_mp.Queue] = None ) -> Tuple[_mp.Process, IRPCModelSession]: client_conn,", "= _mp.Pipe() proc = _mp.Process( target=_run_model_session_process, name=\"ModelSessionProcess\", kwargs={\"conn\": server_conn, \"devices\":", "model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None ): try:", "None ) -> Tuple[_mp.Process, IRPCModelSession]: client_conn, server_conn = _mp.Pipe() proc", "import numpy from tiktorch import log from tiktorch.rpc import Shutdown", "int]]] halo: List[Tuple[str, int]] offset: List[Tuple[str, int]] scale: List[Tuple[str, float]]", "import eval_model_zip from .backend import base from .rpc_interface import IRPCModelSession", "srv.listen() def start_model_session_process( model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] =", "log_queue: Optional[_mp.Queue] = None ): try: # from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import", "def get_model_info(self) -> ModelInfo: return ModelInfo( self._model.name, self._model.input_axes, self._model.output_axes, valid_shapes=[self._model.input_shape],", "): try: # from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)", "io import multiprocessing as _mp import uuid import zipfile from", "\"log_queue\": log_queue, \"model_zip\": model_zip}, ) proc.start() return proc, _mp_rpc.create_client(IRPCModelSession, client_conn)", "-> ModelInfo: return ModelInfo( self._model.name, self._model.input_axes, self._model.output_axes, valid_shapes=[self._model.input_shape], halo=self._model.halo, scale=self._model.scale,", "self._model = eval_model_zip(model_file, devices) self._datasets = {} self._worker = base.SessionBackend(self._model)", "resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1])) except ModuleNotFoundError: pass # probably running", "str output_axes: str valid_shapes: List[List[Tuple[str, int]]] halo: List[Tuple[str, int]] offset:", "def __init__(self, model_zip: bytes, devices: List[str]) -> None: with zipfile.ZipFile(io.BytesIO(model_zip))", "devices: List[str], log_queue: Optional[_mp.Queue] = None ) -> Tuple[_mp.Process, IRPCModelSession]:", "import mp as _mp_rpc from tiktorch.rpc.mp import MPServer from tiktorch.server.reader", "List[str], log_queue: Optional[_mp.Queue] = None ): try: # from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667", "import log from tiktorch.rpc import Shutdown from tiktorch.rpc import mp", "scale=self._model.scale, offset=self._model.offset, ) def shutdown(self) -> Shutdown: self._worker.shutdown() return Shutdown()", "str input_axes: str output_axes: str valid_shapes: List[List[Tuple[str, int]]] halo: List[Tuple[str," ]
[ "new entities Result: - 'Copy->Pasted' entities won't have same AvalonID", "ignore_me = True def launch(self, session, event): created = []", "priority = SyncToAvalonEvent.priority - 1 ignore_me = True def launch(self,", "= event['data']['entities'] for entity in entities: try: entity_id = entity['entityId']", "entity.get('action', None) == 'update' and CUST_ATTR_ID_KEY in entity['keys'] and entity_id", "must be less than SyncToAvalon event ''' priority = SyncToAvalonEvent.priority", "of this event must be less than SyncToAvalon event '''", "id_dict['new'] is not None and id_dict['old'] is None: created.append(id_dict['new']) elif", "): ftrack_entity = session.get( self._get_entity_type(entity), entity_id ) cust_attrs = ftrack_entity[\"custom_attributes\"]", "event['data']['entities'] for entity in entities: try: entity_id = entity['entityId'] if", "= SyncToAvalonEvent.priority - 1 ignore_me = True def launch(self, session,", "== 'update' and CUST_ATTR_ID_KEY in entity['keys'] and entity_id in created", "continue def register(session): '''Register plugin. Called when used as an", "AvalonID as source entity Priority of this event must be", "import BaseEvent from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import (", "is not None and id_dict['old'] is None: created.append(id_dict['new']) elif (", "= session.get( self._get_entity_type(entity), entity_id ) cust_attrs = ftrack_entity[\"custom_attributes\"] if cust_attrs[CUST_ATTR_ID_KEY]:", "try: entity_id = entity['entityId'] if entity.get('action', None) == 'add': id_dict", "session, event): created = [] entities = event['data']['entities'] for entity", "entities: try: entity_id = entity['entityId'] if entity.get('action', None) == 'add':", "None) == 'add': id_dict = entity['changes']['id'] if id_dict['new'] is not", "entity Priority of this event must be less than SyncToAvalon", "entities won't have same AvalonID as source entity Priority of", "This event removes AvalonId from custom attributes of new entities", "from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( SyncToAvalonEvent ) class DelAvalonIdFromNew(BaseEvent): ''' This", "== 'add': id_dict = entity['changes']['id'] if id_dict['new'] is not None", "openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( SyncToAvalonEvent ) class", "if id_dict['new'] is not None and id_dict['old'] is None: created.append(id_dict['new'])", "= entity['changes']['id'] if id_dict['new'] is not None and id_dict['old'] is", "openpype.modules.ftrack.lib import BaseEvent from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import", ") class DelAvalonIdFromNew(BaseEvent): ''' This event removes AvalonId from custom", "True def launch(self, session, event): created = [] entities =", "of new entities Result: - 'Copy->Pasted' entities won't have same", "[] entities = event['data']['entities'] for entity in entities: try: entity_id", "same AvalonID as source entity Priority of this event must", "won't have same AvalonID as source entity Priority of this", "cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY] = \"\" session.commit() except Exception: session.rollback() continue def", "AvalonId from custom attributes of new entities Result: - 'Copy->Pasted'", "cust_attrs = ftrack_entity[\"custom_attributes\"] if cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY] = \"\" session.commit() except", "launch(self, session, event): created = [] entities = event['data']['entities'] for", "import CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( SyncToAvalonEvent ) class DelAvalonIdFromNew(BaseEvent):", "register(session): '''Register plugin. Called when used as an plugin.''' DelAvalonIdFromNew(session).register()", "''' This event removes AvalonId from custom attributes of new", "def register(session): '''Register plugin. Called when used as an plugin.'''", "if cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY] = \"\" session.commit() except Exception: session.rollback() continue", "from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( SyncToAvalonEvent )", "this event must be less than SyncToAvalon event ''' priority", "if entity.get('action', None) == 'add': id_dict = entity['changes']['id'] if id_dict['new']", "= \"\" session.commit() except Exception: session.rollback() continue def register(session): '''Register", "SyncToAvalon event ''' priority = SyncToAvalonEvent.priority - 1 ignore_me =", "is None: created.append(id_dict['new']) elif ( entity.get('action', None) == 'update' and", "event ''' priority = SyncToAvalonEvent.priority - 1 ignore_me = True", "event must be less than SyncToAvalon event ''' priority =", "from openpype.modules.ftrack.lib import BaseEvent from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon", "''' priority = SyncToAvalonEvent.priority - 1 ignore_me = True def", "not None and id_dict['old'] is None: created.append(id_dict['new']) elif ( entity.get('action',", "created.append(id_dict['new']) elif ( entity.get('action', None) == 'update' and CUST_ATTR_ID_KEY in", "and entity_id in created ): ftrack_entity = session.get( self._get_entity_type(entity), entity_id", "'Copy->Pasted' entities won't have same AvalonID as source entity Priority", "be less than SyncToAvalon event ''' priority = SyncToAvalonEvent.priority -", "CUST_ATTR_ID_KEY in entity['keys'] and entity_id in created ): ftrack_entity =", "in entity['keys'] and entity_id in created ): ftrack_entity = session.get(", "class DelAvalonIdFromNew(BaseEvent): ''' This event removes AvalonId from custom attributes", "Exception: session.rollback() continue def register(session): '''Register plugin. Called when used", "entity_id in created ): ftrack_entity = session.get( self._get_entity_type(entity), entity_id )", "self._get_entity_type(entity), entity_id ) cust_attrs = ftrack_entity[\"custom_attributes\"] if cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY] =", "= ftrack_entity[\"custom_attributes\"] if cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY] = \"\" session.commit() except Exception:", "'update' and CUST_ATTR_ID_KEY in entity['keys'] and entity_id in created ):", "have same AvalonID as source entity Priority of this event", "None and id_dict['old'] is None: created.append(id_dict['new']) elif ( entity.get('action', None)", "entity_id ) cust_attrs = ftrack_entity[\"custom_attributes\"] if cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY] = \"\"", "Result: - 'Copy->Pasted' entities won't have same AvalonID as source", "session.commit() except Exception: session.rollback() continue def register(session): '''Register plugin. Called", "entity in entities: try: entity_id = entity['entityId'] if entity.get('action', None)", "openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( SyncToAvalonEvent ) class DelAvalonIdFromNew(BaseEvent): ''' This event", "None) == 'update' and CUST_ATTR_ID_KEY in entity['keys'] and entity_id in", "DelAvalonIdFromNew(BaseEvent): ''' This event removes AvalonId from custom attributes of", "entities Result: - 'Copy->Pasted' entities won't have same AvalonID as", "for entity in entities: try: entity_id = entity['entityId'] if entity.get('action',", "and CUST_ATTR_ID_KEY in entity['keys'] and entity_id in created ): ftrack_entity", "event): created = [] entities = event['data']['entities'] for entity in", "- 1 ignore_me = True def launch(self, session, event): created", "- 'Copy->Pasted' entities won't have same AvalonID as source entity", "and id_dict['old'] is None: created.append(id_dict['new']) elif ( entity.get('action', None) ==", "custom attributes of new entities Result: - 'Copy->Pasted' entities won't", "( SyncToAvalonEvent ) class DelAvalonIdFromNew(BaseEvent): ''' This event removes AvalonId", "event removes AvalonId from custom attributes of new entities Result:", "= entity['entityId'] if entity.get('action', None) == 'add': id_dict = entity['changes']['id']", "cust_attrs[CUST_ATTR_ID_KEY] = \"\" session.commit() except Exception: session.rollback() continue def register(session):", "ftrack_entity = session.get( self._get_entity_type(entity), entity_id ) cust_attrs = ftrack_entity[\"custom_attributes\"] if", "\"\" session.commit() except Exception: session.rollback() continue def register(session): '''Register plugin.", "session.get( self._get_entity_type(entity), entity_id ) cust_attrs = ftrack_entity[\"custom_attributes\"] if cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY]", "less than SyncToAvalon event ''' priority = SyncToAvalonEvent.priority - 1", "entity_id = entity['entityId'] if entity.get('action', None) == 'add': id_dict =", "SyncToAvalonEvent ) class DelAvalonIdFromNew(BaseEvent): ''' This event removes AvalonId from", "SyncToAvalonEvent.priority - 1 ignore_me = True def launch(self, session, event):", "( entity.get('action', None) == 'update' and CUST_ATTR_ID_KEY in entity['keys'] and", "CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( SyncToAvalonEvent ) class DelAvalonIdFromNew(BaseEvent): '''", "entity['changes']['id'] if id_dict['new'] is not None and id_dict['old'] is None:", "source entity Priority of this event must be less than", "in entities: try: entity_id = entity['entityId'] if entity.get('action', None) ==", "id_dict = entity['changes']['id'] if id_dict['new'] is not None and id_dict['old']", "Priority of this event must be less than SyncToAvalon event", "entities = event['data']['entities'] for entity in entities: try: entity_id =", "in created ): ftrack_entity = session.get( self._get_entity_type(entity), entity_id ) cust_attrs", "= [] entities = event['data']['entities'] for entity in entities: try:", "'add': id_dict = entity['changes']['id'] if id_dict['new'] is not None and", "import ( SyncToAvalonEvent ) class DelAvalonIdFromNew(BaseEvent): ''' This event removes", "1 ignore_me = True def launch(self, session, event): created =", "created ): ftrack_entity = session.get( self._get_entity_type(entity), entity_id ) cust_attrs =", ") cust_attrs = ftrack_entity[\"custom_attributes\"] if cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY] = \"\" session.commit()", "session.rollback() continue def register(session): '''Register plugin. Called when used as", "except Exception: session.rollback() continue def register(session): '''Register plugin. Called when", "as source entity Priority of this event must be less", "entity['entityId'] if entity.get('action', None) == 'add': id_dict = entity['changes']['id'] if", "entity.get('action', None) == 'add': id_dict = entity['changes']['id'] if id_dict['new'] is", "def launch(self, session, event): created = [] entities = event['data']['entities']", "than SyncToAvalon event ''' priority = SyncToAvalonEvent.priority - 1 ignore_me", "elif ( entity.get('action', None) == 'update' and CUST_ATTR_ID_KEY in entity['keys']", "id_dict['old'] is None: created.append(id_dict['new']) elif ( entity.get('action', None) == 'update'", "entity['keys'] and entity_id in created ): ftrack_entity = session.get( self._get_entity_type(entity),", "attributes of new entities Result: - 'Copy->Pasted' entities won't have", "from custom attributes of new entities Result: - 'Copy->Pasted' entities", "created = [] entities = event['data']['entities'] for entity in entities:", "= True def launch(self, session, event): created = [] entities", "removes AvalonId from custom attributes of new entities Result: -", "None: created.append(id_dict['new']) elif ( entity.get('action', None) == 'update' and CUST_ATTR_ID_KEY", "BaseEvent from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( SyncToAvalonEvent", "ftrack_entity[\"custom_attributes\"] if cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY] = \"\" session.commit() except Exception: session.rollback()" ]
[ "= workflow_IngestAcceptedSubmission( settings_mock, FakeLogger(), None, None, None, None ) def", "settings_mock, FakeLogger(), None, None, None, None ) def test_init(self): self.assertEqual(self.workflow.name,", "workflow_IngestAcceptedSubmission( settings_mock, FakeLogger(), None, None, None, None ) def test_init(self):", "settings_mock from tests.activity.classes_mock import FakeLogger from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission class", "from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission class TestWorkflowIngestAcceptedSubmission(unittest.TestCase): def setUp(self): self.workflow =", "from tests.activity.classes_mock import FakeLogger from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission class TestWorkflowIngestAcceptedSubmission(unittest.TestCase):", "self.workflow = workflow_IngestAcceptedSubmission( settings_mock, FakeLogger(), None, None, None, None )", "import FakeLogger from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission class TestWorkflowIngestAcceptedSubmission(unittest.TestCase): def setUp(self):", "tests.activity.classes_mock import FakeLogger from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission class TestWorkflowIngestAcceptedSubmission(unittest.TestCase): def", "FakeLogger(), None, None, None, None ) def test_init(self): self.assertEqual(self.workflow.name, \"IngestAcceptedSubmission\")", "as settings_mock from tests.activity.classes_mock import FakeLogger from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission", "workflow_IngestAcceptedSubmission class TestWorkflowIngestAcceptedSubmission(unittest.TestCase): def setUp(self): self.workflow = workflow_IngestAcceptedSubmission( settings_mock, FakeLogger(),", "class TestWorkflowIngestAcceptedSubmission(unittest.TestCase): def setUp(self): self.workflow = workflow_IngestAcceptedSubmission( settings_mock, FakeLogger(), None,", "TestWorkflowIngestAcceptedSubmission(unittest.TestCase): def setUp(self): self.workflow = workflow_IngestAcceptedSubmission( settings_mock, FakeLogger(), None, None,", "unittest import tests.settings_mock as settings_mock from tests.activity.classes_mock import FakeLogger from", "import unittest import tests.settings_mock as settings_mock from tests.activity.classes_mock import FakeLogger", "setUp(self): self.workflow = workflow_IngestAcceptedSubmission( settings_mock, FakeLogger(), None, None, None, None", "tests.settings_mock as settings_mock from tests.activity.classes_mock import FakeLogger from workflow.workflow_IngestAcceptedSubmission import", "FakeLogger from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission class TestWorkflowIngestAcceptedSubmission(unittest.TestCase): def setUp(self): self.workflow", "def setUp(self): self.workflow = workflow_IngestAcceptedSubmission( settings_mock, FakeLogger(), None, None, None,", "import tests.settings_mock as settings_mock from tests.activity.classes_mock import FakeLogger from workflow.workflow_IngestAcceptedSubmission", "workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission class TestWorkflowIngestAcceptedSubmission(unittest.TestCase): def setUp(self): self.workflow = workflow_IngestAcceptedSubmission(", "import workflow_IngestAcceptedSubmission class TestWorkflowIngestAcceptedSubmission(unittest.TestCase): def setUp(self): self.workflow = workflow_IngestAcceptedSubmission( settings_mock," ]
[ "the setup magic. api = vumi_api() token_data = api.token_manager.get(token) if", "here, but it's saner to get a whole # vumi_api", "need to be if not user_id or request.user.id == user_id:", "user_id or request.user.id == user_id: path, _, qs = redirect_to.partition('?')", "== user_id: path, _, qs = redirect_to.partition('?') params = urlparse.parse_qs(qs)", "verify the system token. params.update({'token': '%s-%s%s' % (len(token), token, system_token)})", "import reverse from django.contrib.auth.decorators import login_required from vumi.utils import load_class_by_string", "reverse('token', kwargs={'token': token}), }))) @login_required def token_task(request): api = request.user_api.api", "callback_kwargs = params['callback_kwargs'] return_to = params['return_to'] message = params['message'] message_level", "logged in with a wrong account. if request.user.is_authenticated(): logout(request) messages.info(request,", "reverse from django.contrib.auth.decorators import login_required from vumi.utils import load_class_by_string from", "# vumi_api and not worry about all the setup magic.", "we prepend the size of the user_token # to the", "about all the setup magic. api = vumi_api() token_data =", "token): # We only need the redis manager here, but", "then we need authentication and the user's either not #", "we need to be if not user_id or request.user.id ==", "# If we got here then we need authentication and", "= params['message'] message_level = params['message_level'] callback = load_class_by_string(callback_name) callback(*callback_args, **callback_kwargs)", "(len(token), token, system_token)}) return redirect('%s?%s' % (path, urlencode(params))) # If", "can lookup the token and verify the system token. params.update({'token':", "to # where we need to be if not user_id", "raise Http404 user_id = int(token_data['user_id']) redirect_to = token_data['redirect_to'] system_token =", "the size of the user_token # to the token being", "params['callback_name'] callback_args = params['callback_args'] callback_kwargs = params['callback_kwargs'] return_to = params['return_to']", "either not # logged in or is logged in with", "load_class_by_string from go.base.utils import vumi_api def token(request, token): # We", "from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from vumi.utils", "the token and verify the system token. params.update({'token': '%s-%s%s' %", "token being forwarded so the view handling the `redirect_to` #", "token}), }))) @login_required def token_task(request): api = request.user_api.api token =", "params['callback_kwargs'] return_to = params['return_to'] message = params['message'] message_level = params['message_level']", "api.token_manager.verify_get(token) if not token_data: raise Http404 params = token_data['extra_params'] callback_name", "all the setup magic. api = vumi_api() token_data = api.token_manager.get(token)", "and we're the same user_id then redirect to # where", "logout from django.contrib import messages from django.core.urlresolvers import reverse from", "django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from vumi.utils import", "system_token = token_data['system_token'] # If we're authorized and we're the", "= token_data['system_token'] # If we're authorized and we're the same", "then redirect to # where we need to be if", "urlencode({ 'next': reverse('token', kwargs={'token': token}), }))) @login_required def token_task(request): api", "token_data['extra_params'] callback_name = params['callback_name'] callback_args = params['callback_args'] callback_kwargs = params['callback_kwargs']", "the view handling the `redirect_to` # can lookup the token", "= redirect_to.partition('?') params = urlparse.parse_qs(qs) # since the token can", "we got here then we need authentication and the user's", "params.update({'token': '%s-%s%s' % (len(token), token, system_token)}) return redirect('%s?%s' % (path,", "need authentication and the user's either not # logged in", "where we need to be if not user_id or request.user.id", "We only need the redis manager here, but it's saner", "= int(token_data['user_id']) redirect_to = token_data['redirect_to'] system_token = token_data['system_token'] # If", "token(request, token): # We only need the redis manager here,", "import Http404, redirect from django.contrib.auth.views import logout from django.contrib import", "import urlparse from django.shortcuts import Http404, redirect from django.contrib.auth.views import", "we need authentication and the user's either not # logged", "callback_args = params['callback_args'] callback_kwargs = params['callback_kwargs'] return_to = params['return_to'] message", "a wrong account. if request.user.is_authenticated(): logout(request) messages.info(request, 'Wrong account for", "# can lookup the token and verify the system token.", "lookup the token and verify the system token. params.update({'token': '%s-%s%s'", "% (reverse('auth_login'), urlencode({ 'next': reverse('token', kwargs={'token': token}), }))) @login_required def", "messages from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from", "api.token_manager.get(token) if not token_data: raise Http404 user_id = int(token_data['user_id']) redirect_to", "token_data: raise Http404 user_id = int(token_data['user_id']) redirect_to = token_data['redirect_to'] system_token", "user_id then redirect to # where we need to be", "_, qs = redirect_to.partition('?') params = urlparse.parse_qs(qs) # since the", "raise Http404 params = token_data['extra_params'] callback_name = params['callback_name'] callback_args =", "with a wrong account. if request.user.is_authenticated(): logout(request) messages.info(request, 'Wrong account", "qs = redirect_to.partition('?') params = urlparse.parse_qs(qs) # since the token", "be if not user_id or request.user.id == user_id: path, _,", "django.contrib.auth.decorators import login_required from vumi.utils import load_class_by_string from go.base.utils import", "= token_data['redirect_to'] system_token = token_data['system_token'] # If we're authorized and", "django.shortcuts import Http404, redirect from django.contrib.auth.views import logout from django.contrib", "import logout from django.contrib import messages from django.core.urlresolvers import reverse", "# We only need the redis manager here, but it's", "same user_id then redirect to # where we need to", "return redirect('%s?%s' % (path, urlencode(params))) # If we got here", "authentication and the user's either not # logged in or", "from django.contrib import messages from django.core.urlresolvers import reverse from django.contrib.auth.decorators", "not # logged in or is logged in with a", "to be if not user_id or request.user.id == user_id: path,", "the token can be custom we prepend the size of", "vumi_api def token(request, token): # We only need the redis", "got here then we need authentication and the user's either", "params['message'] message_level = params['message_level'] callback = load_class_by_string(callback_name) callback(*callback_args, **callback_kwargs) messages.add_message(request,", "request.user_api.api token = request.GET.get('token') token_data = api.token_manager.verify_get(token) if not token_data:", "params['message_level'] callback = load_class_by_string(callback_name) callback(*callback_args, **callback_kwargs) messages.add_message(request, message_level, message) return", "import messages from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required", "magic. api = vumi_api() token_data = api.token_manager.get(token) if not token_data:", "params['return_to'] message = params['message'] message_level = params['message_level'] callback = load_class_by_string(callback_name)", "to the token being forwarded so the view handling the", "urlparse.parse_qs(qs) # since the token can be custom we prepend", "in with a wrong account. if request.user.is_authenticated(): logout(request) messages.info(request, 'Wrong", "token = request.GET.get('token') token_data = api.token_manager.verify_get(token) if not token_data: raise", "redirect to # where we need to be if not", "path, _, qs = redirect_to.partition('?') params = urlparse.parse_qs(qs) # since", "urlparse from django.shortcuts import Http404, redirect from django.contrib.auth.views import logout", "'%s-%s%s' % (len(token), token, system_token)}) return redirect('%s?%s' % (path, urlencode(params)))", "the redis manager here, but it's saner to get a", "from go.base.utils import vumi_api def token(request, token): # We only", "only need the redis manager here, but it's saner to", "def token_task(request): api = request.user_api.api token = request.GET.get('token') token_data =", "def token(request, token): # We only need the redis manager", "to get a whole # vumi_api and not worry about", "import urlencode import urlparse from django.shortcuts import Http404, redirect from", "a whole # vumi_api and not worry about all the", "if not token_data: raise Http404 user_id = int(token_data['user_id']) redirect_to =", "from urllib import urlencode import urlparse from django.shortcuts import Http404,", "redirect_to = token_data['redirect_to'] system_token = token_data['system_token'] # If we're authorized", "= params['return_to'] message = params['message'] message_level = params['message_level'] callback =", "the `redirect_to` # can lookup the token and verify the", "handling the `redirect_to` # can lookup the token and verify", "@login_required def token_task(request): api = request.user_api.api token = request.GET.get('token') token_data", "= api.token_manager.get(token) if not token_data: raise Http404 user_id = int(token_data['user_id'])", "we're the same user_id then redirect to # where we", "need the redis manager here, but it's saner to get", "and the user's either not # logged in or is", "logged in or is logged in with a wrong account.", "system_token)}) return redirect('%s?%s' % (path, urlencode(params))) # If we got", "the user's either not # logged in or is logged", "manager here, but it's saner to get a whole #", "= api.token_manager.verify_get(token) if not token_data: raise Http404 params = token_data['extra_params']", "import vumi_api def token(request, token): # We only need the", "import load_class_by_string from go.base.utils import vumi_api def token(request, token): #", "If we're authorized and we're the same user_id then redirect", "forwarded so the view handling the `redirect_to` # can lookup", "account for this token.') return redirect('%s?%s' % (reverse('auth_login'), urlencode({ 'next':", "here then we need authentication and the user's either not", "= params['callback_args'] callback_kwargs = params['callback_kwargs'] return_to = params['return_to'] message =", "the same user_id then redirect to # where we need", "wrong account. if request.user.is_authenticated(): logout(request) messages.info(request, 'Wrong account for this", "token, system_token)}) return redirect('%s?%s' % (path, urlencode(params))) # If we", "redirect('%s?%s' % (path, urlencode(params))) # If we got here then", "params['callback_args'] callback_kwargs = params['callback_kwargs'] return_to = params['return_to'] message = params['message']", "token.') return redirect('%s?%s' % (reverse('auth_login'), urlencode({ 'next': reverse('token', kwargs={'token': token}),", "in or is logged in with a wrong account. if", "the token being forwarded so the view handling the `redirect_to`", "= params['message_level'] callback = load_class_by_string(callback_name) callback(*callback_args, **callback_kwargs) messages.add_message(request, message_level, message)", "import login_required from vumi.utils import load_class_by_string from go.base.utils import vumi_api", "= request.GET.get('token') token_data = api.token_manager.verify_get(token) if not token_data: raise Http404", "return_to = params['return_to'] message = params['message'] message_level = params['message_level'] callback", "= vumi_api() token_data = api.token_manager.get(token) if not token_data: raise Http404", "If we got here then we need authentication and the", "setup magic. api = vumi_api() token_data = api.token_manager.get(token) if not", "# to the token being forwarded so the view handling", "# If we're authorized and we're the same user_id then", "api = request.user_api.api token = request.GET.get('token') token_data = api.token_manager.verify_get(token) if", "token can be custom we prepend the size of the", "it's saner to get a whole # vumi_api and not", "token_data = api.token_manager.get(token) if not token_data: raise Http404 user_id =", "if request.user.is_authenticated(): logout(request) messages.info(request, 'Wrong account for this token.') return", "login_required from vumi.utils import load_class_by_string from go.base.utils import vumi_api def", "from django.shortcuts import Http404, redirect from django.contrib.auth.views import logout from", "urlencode import urlparse from django.shortcuts import Http404, redirect from django.contrib.auth.views", "is logged in with a wrong account. if request.user.is_authenticated(): logout(request)", "(path, urlencode(params))) # If we got here then we need", "whole # vumi_api and not worry about all the setup", "token. params.update({'token': '%s-%s%s' % (len(token), token, system_token)}) return redirect('%s?%s' %", "not user_id or request.user.id == user_id: path, _, qs =", "request.user.is_authenticated(): logout(request) messages.info(request, 'Wrong account for this token.') return redirect('%s?%s'", "api = vumi_api() token_data = api.token_manager.get(token) if not token_data: raise", "# since the token can be custom we prepend the", "get a whole # vumi_api and not worry about all", "size of the user_token # to the token being forwarded", "the user_token # to the token being forwarded so the", "}))) @login_required def token_task(request): api = request.user_api.api token = request.GET.get('token')", "we're authorized and we're the same user_id then redirect to", "= token_data['extra_params'] callback_name = params['callback_name'] callback_args = params['callback_args'] callback_kwargs =", "this token.') return redirect('%s?%s' % (reverse('auth_login'), urlencode({ 'next': reverse('token', kwargs={'token':", "account. if request.user.is_authenticated(): logout(request) messages.info(request, 'Wrong account for this token.')", "params = urlparse.parse_qs(qs) # since the token can be custom", "redirect_to.partition('?') params = urlparse.parse_qs(qs) # since the token can be", "token_data = api.token_manager.verify_get(token) if not token_data: raise Http404 params =", "Http404, redirect from django.contrib.auth.views import logout from django.contrib import messages", "token_task(request): api = request.user_api.api token = request.GET.get('token') token_data = api.token_manager.verify_get(token)", "urllib import urlencode import urlparse from django.shortcuts import Http404, redirect", "Http404 user_id = int(token_data['user_id']) redirect_to = token_data['redirect_to'] system_token = token_data['system_token']", "# where we need to be if not user_id or", "not token_data: raise Http404 params = token_data['extra_params'] callback_name = params['callback_name']", "message_level = params['message_level'] callback = load_class_by_string(callback_name) callback(*callback_args, **callback_kwargs) messages.add_message(request, message_level,", "user_id: path, _, qs = redirect_to.partition('?') params = urlparse.parse_qs(qs) #", "view handling the `redirect_to` # can lookup the token and", "vumi_api() token_data = api.token_manager.get(token) if not token_data: raise Http404 user_id", "token and verify the system token. params.update({'token': '%s-%s%s' % (len(token),", "not worry about all the setup magic. api = vumi_api()", "or request.user.id == user_id: path, _, qs = redirect_to.partition('?') params", "callback_name = params['callback_name'] callback_args = params['callback_args'] callback_kwargs = params['callback_kwargs'] return_to", "and verify the system token. params.update({'token': '%s-%s%s' % (len(token), token,", "user_token # to the token being forwarded so the view", "`redirect_to` # can lookup the token and verify the system", "urlencode(params))) # If we got here then we need authentication", "= request.user_api.api token = request.GET.get('token') token_data = api.token_manager.verify_get(token) if not", "= params['callback_name'] callback_args = params['callback_args'] callback_kwargs = params['callback_kwargs'] return_to =", "from django.contrib.auth.decorators import login_required from vumi.utils import load_class_by_string from go.base.utils", "prepend the size of the user_token # to the token", "'next': reverse('token', kwargs={'token': token}), }))) @login_required def token_task(request): api =", "int(token_data['user_id']) redirect_to = token_data['redirect_to'] system_token = token_data['system_token'] # If we're", "system token. params.update({'token': '%s-%s%s' % (len(token), token, system_token)}) return redirect('%s?%s'", "kwargs={'token': token}), }))) @login_required def token_task(request): api = request.user_api.api token", "if not user_id or request.user.id == user_id: path, _, qs", "or is logged in with a wrong account. if request.user.is_authenticated():", "redirect('%s?%s' % (reverse('auth_login'), urlencode({ 'next': reverse('token', kwargs={'token': token}), }))) @login_required", "the system token. params.update({'token': '%s-%s%s' % (len(token), token, system_token)}) return", "saner to get a whole # vumi_api and not worry", "custom we prepend the size of the user_token # to", "= urlparse.parse_qs(qs) # since the token can be custom we", "since the token can be custom we prepend the size", "so the view handling the `redirect_to` # can lookup the", "of the user_token # to the token being forwarded so", "being forwarded so the view handling the `redirect_to` # can", "token_data['redirect_to'] system_token = token_data['system_token'] # If we're authorized and we're", "can be custom we prepend the size of the user_token", "user's either not # logged in or is logged in", "user_id = int(token_data['user_id']) redirect_to = token_data['redirect_to'] system_token = token_data['system_token'] #", "if not token_data: raise Http404 params = token_data['extra_params'] callback_name =", "# logged in or is logged in with a wrong", "message = params['message'] message_level = params['message_level'] callback = load_class_by_string(callback_name) callback(*callback_args,", "django.contrib import messages from django.core.urlresolvers import reverse from django.contrib.auth.decorators import", "redirect from django.contrib.auth.views import logout from django.contrib import messages from", "redis manager here, but it's saner to get a whole", "messages.info(request, 'Wrong account for this token.') return redirect('%s?%s' % (reverse('auth_login'),", "token_data: raise Http404 params = token_data['extra_params'] callback_name = params['callback_name'] callback_args", "callback = load_class_by_string(callback_name) callback(*callback_args, **callback_kwargs) messages.add_message(request, message_level, message) return redirect(return_to)", "logout(request) messages.info(request, 'Wrong account for this token.') return redirect('%s?%s' %", "'Wrong account for this token.') return redirect('%s?%s' % (reverse('auth_login'), urlencode({", "params = token_data['extra_params'] callback_name = params['callback_name'] callback_args = params['callback_args'] callback_kwargs", "authorized and we're the same user_id then redirect to #", "from vumi.utils import load_class_by_string from go.base.utils import vumi_api def token(request,", "Http404 params = token_data['extra_params'] callback_name = params['callback_name'] callback_args = params['callback_args']", "for this token.') return redirect('%s?%s' % (reverse('auth_login'), urlencode({ 'next': reverse('token',", "request.user.id == user_id: path, _, qs = redirect_to.partition('?') params =", "(reverse('auth_login'), urlencode({ 'next': reverse('token', kwargs={'token': token}), }))) @login_required def token_task(request):", "= params['callback_kwargs'] return_to = params['return_to'] message = params['message'] message_level =", "vumi_api and not worry about all the setup magic. api", "request.GET.get('token') token_data = api.token_manager.verify_get(token) if not token_data: raise Http404 params", "go.base.utils import vumi_api def token(request, token): # We only need", "not token_data: raise Http404 user_id = int(token_data['user_id']) redirect_to = token_data['redirect_to']", "be custom we prepend the size of the user_token #", "% (len(token), token, system_token)}) return redirect('%s?%s' % (path, urlencode(params))) #", "and not worry about all the setup magic. api =", "from django.contrib.auth.views import logout from django.contrib import messages from django.core.urlresolvers", "token_data['system_token'] # If we're authorized and we're the same user_id", "but it's saner to get a whole # vumi_api and", "worry about all the setup magic. api = vumi_api() token_data", "vumi.utils import load_class_by_string from go.base.utils import vumi_api def token(request, token):", "% (path, urlencode(params))) # If we got here then we", "django.contrib.auth.views import logout from django.contrib import messages from django.core.urlresolvers import", "return redirect('%s?%s' % (reverse('auth_login'), urlencode({ 'next': reverse('token', kwargs={'token': token}), })))" ]
[ "return wrapper register.filter('amp', make_safe(amp)) register.filter('caps', make_safe(caps)) register.filter('initial_quotes', make_safe(initial_quotes)) register.filter('smartypants', make_safe(smartypants))", "out = f(text) except TypogrifyError, e: if settings.DEBUG: raise e", "django.utils.encoding import force_unicode register = template.Library() def make_safe(f): \"\"\" A", "text = force_unicode(text) f.is_safe = True out = text try:", "def make_safe(f): \"\"\" A function wrapper to make typogrify play", "e return text return mark_safe(out) wrapper.is_safe = True return wrapper", "make typogrify play nice with django's unicode support. \"\"\" @wraps(f)", "out = text try: out = f(text) except TypogrifyError, e:", "register.filter('amp', make_safe(amp)) register.filter('caps', make_safe(caps)) register.filter('initial_quotes', make_safe(initial_quotes)) register.filter('smartypants', make_safe(smartypants)) register.filter('titlecase', make_safe(titlecase))", "register = template.Library() def make_safe(f): \"\"\" A function wrapper to", "functools import wraps from django.conf import settings from django import", "amp, caps, initial_quotes, smartypants, titlecase, typogrify, widont, TypogrifyError from functools", "= True out = text try: out = f(text) except", "register.filter('caps', make_safe(caps)) register.filter('initial_quotes', make_safe(initial_quotes)) register.filter('smartypants', make_safe(smartypants)) register.filter('titlecase', make_safe(titlecase)) register.filter('typogrify', make_safe(typogrify))", "= template.Library() def make_safe(f): \"\"\" A function wrapper to make", "= force_unicode(text) f.is_safe = True out = text try: out", "support. \"\"\" @wraps(f) def wrapper(text): text = force_unicode(text) f.is_safe =", "template from django.utils.safestring import mark_safe from django.utils.encoding import force_unicode register", "= text try: out = f(text) except TypogrifyError, e: if", "TypogrifyError, e: if settings.DEBUG: raise e return text return mark_safe(out)", "text return mark_safe(out) wrapper.is_safe = True return wrapper register.filter('amp', make_safe(amp))", "register.filter('initial_quotes', make_safe(initial_quotes)) register.filter('smartypants', make_safe(smartypants)) register.filter('titlecase', make_safe(titlecase)) register.filter('typogrify', make_safe(typogrify)) register.filter('widont', make_safe(widont))", "if settings.DEBUG: raise e return text return mark_safe(out) wrapper.is_safe =", "f(text) except TypogrifyError, e: if settings.DEBUG: raise e return text", "titlecase, typogrify, widont, TypogrifyError from functools import wraps from django.conf", "from django.conf import settings from django import template from django.utils.safestring", "import template from django.utils.safestring import mark_safe from django.utils.encoding import force_unicode", "raise e return text return mark_safe(out) wrapper.is_safe = True return", "return text return mark_safe(out) wrapper.is_safe = True return wrapper register.filter('amp',", "from typogrify.filters import amp, caps, initial_quotes, smartypants, titlecase, typogrify, widont,", "f.is_safe = True out = text try: out = f(text)", "A function wrapper to make typogrify play nice with django's", "True return wrapper register.filter('amp', make_safe(amp)) register.filter('caps', make_safe(caps)) register.filter('initial_quotes', make_safe(initial_quotes)) register.filter('smartypants',", "widont, TypogrifyError from functools import wraps from django.conf import settings", "= f(text) except TypogrifyError, e: if settings.DEBUG: raise e return", "= True return wrapper register.filter('amp', make_safe(amp)) register.filter('caps', make_safe(caps)) register.filter('initial_quotes', make_safe(initial_quotes))", "typogrify play nice with django's unicode support. \"\"\" @wraps(f) def", "mark_safe(out) wrapper.is_safe = True return wrapper register.filter('amp', make_safe(amp)) register.filter('caps', make_safe(caps))", "from django.utils.encoding import force_unicode register = template.Library() def make_safe(f): \"\"\"", "TypogrifyError from functools import wraps from django.conf import settings from", "@wraps(f) def wrapper(text): text = force_unicode(text) f.is_safe = True out", "unicode support. \"\"\" @wraps(f) def wrapper(text): text = force_unicode(text) f.is_safe", "nice with django's unicode support. \"\"\" @wraps(f) def wrapper(text): text", "wrapper(text): text = force_unicode(text) f.is_safe = True out = text", "function wrapper to make typogrify play nice with django's unicode", "import force_unicode register = template.Library() def make_safe(f): \"\"\" A function", "template.Library() def make_safe(f): \"\"\" A function wrapper to make typogrify", "django.utils.safestring import mark_safe from django.utils.encoding import force_unicode register = template.Library()", "make_safe(amp)) register.filter('caps', make_safe(caps)) register.filter('initial_quotes', make_safe(initial_quotes)) register.filter('smartypants', make_safe(smartypants)) register.filter('titlecase', make_safe(titlecase)) register.filter('typogrify',", "\"\"\" @wraps(f) def wrapper(text): text = force_unicode(text) f.is_safe = True", "make_safe(f): \"\"\" A function wrapper to make typogrify play nice", "make_safe(caps)) register.filter('initial_quotes', make_safe(initial_quotes)) register.filter('smartypants', make_safe(smartypants)) register.filter('titlecase', make_safe(titlecase)) register.filter('typogrify', make_safe(typogrify)) register.filter('widont',", "mark_safe from django.utils.encoding import force_unicode register = template.Library() def make_safe(f):", "wrapper to make typogrify play nice with django's unicode support.", "import mark_safe from django.utils.encoding import force_unicode register = template.Library() def", "settings from django import template from django.utils.safestring import mark_safe from", "from django import template from django.utils.safestring import mark_safe from django.utils.encoding", "try: out = f(text) except TypogrifyError, e: if settings.DEBUG: raise", "def wrapper(text): text = force_unicode(text) f.is_safe = True out =", "e: if settings.DEBUG: raise e return text return mark_safe(out) wrapper.is_safe", "wraps from django.conf import settings from django import template from", "typogrify, widont, TypogrifyError from functools import wraps from django.conf import", "django.conf import settings from django import template from django.utils.safestring import", "initial_quotes, smartypants, titlecase, typogrify, widont, TypogrifyError from functools import wraps", "settings.DEBUG: raise e return text return mark_safe(out) wrapper.is_safe = True", "import amp, caps, initial_quotes, smartypants, titlecase, typogrify, widont, TypogrifyError from", "play nice with django's unicode support. \"\"\" @wraps(f) def wrapper(text):", "to make typogrify play nice with django's unicode support. \"\"\"", "with django's unicode support. \"\"\" @wraps(f) def wrapper(text): text =", "django import template from django.utils.safestring import mark_safe from django.utils.encoding import", "wrapper register.filter('amp', make_safe(amp)) register.filter('caps', make_safe(caps)) register.filter('initial_quotes', make_safe(initial_quotes)) register.filter('smartypants', make_safe(smartypants)) register.filter('titlecase',", "wrapper.is_safe = True return wrapper register.filter('amp', make_safe(amp)) register.filter('caps', make_safe(caps)) register.filter('initial_quotes',", "from functools import wraps from django.conf import settings from django", "smartypants, titlecase, typogrify, widont, TypogrifyError from functools import wraps from", "django's unicode support. \"\"\" @wraps(f) def wrapper(text): text = force_unicode(text)", "force_unicode(text) f.is_safe = True out = text try: out =", "import settings from django import template from django.utils.safestring import mark_safe", "except TypogrifyError, e: if settings.DEBUG: raise e return text return", "import wraps from django.conf import settings from django import template", "caps, initial_quotes, smartypants, titlecase, typogrify, widont, TypogrifyError from functools import", "\"\"\" A function wrapper to make typogrify play nice with", "force_unicode register = template.Library() def make_safe(f): \"\"\" A function wrapper", "typogrify.filters import amp, caps, initial_quotes, smartypants, titlecase, typogrify, widont, TypogrifyError", "text try: out = f(text) except TypogrifyError, e: if settings.DEBUG:", "from django.utils.safestring import mark_safe from django.utils.encoding import force_unicode register =", "True out = text try: out = f(text) except TypogrifyError,", "return mark_safe(out) wrapper.is_safe = True return wrapper register.filter('amp', make_safe(amp)) register.filter('caps'," ]
[ "binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PosInfosVerified\"]", "data # First slice center Z coordinate data, = struct.unpack('<f',", "are stored as a series of bytes. See the V16", "Then follows eventually a section listing spatial # transformations which", "in # the context of advanced segmentation processing. Compared to", "= data if header[\"NrOfPastSpatialTransformations\"] != 0: # NOTE(Developer Guide 2.6):", "Expected binary data: int (4 bytes) data = header[\"PastTransformation\"][i][\"Type\"] f.write(struct.pack('<i',", "are followed by # scan position information from the original", "= read_variable_length_string(f) header[\"PastTransformation\"][i][\"Name\"] = data # Expected binary data: int", "header[\"ReferenceSpaceVMR\"] f.write(struct.pack('<B', data)) # Expected binary data: float (4 bytes)", "# Flip BV axes # --------------------------------------------------------------------- # VMR Post-Data Header", "routines as well as for proper visualization. # Expected binary", "struct.unpack('<f', f.read(4)) header[\"FoVRows\"] = data # Field of view extent", "as f: # --------------------------------------------------------------------- # VMR Pre-Data Header # ---------------------------------------------------------------------", "space # BV (Y top -> bottom) [axis 1 after", "data)) # Expected binary data: float (4 bytes) data =", "body+scale (3 translation, 3 rotation, 3 scale) # \"2\": Affine", "the data set will be internally \"expanded\" # for certain", "series of bytes. See the V16 format # for a", "as a series of bytes. See the V16 format #", "f.write(struct.pack('<f', data)) data = header[\"VoxelSizeY\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeZ\"]", "\"1\", file versions 2 and higher contain # additional header", "Slice thickness [mm] data, = struct.unpack('<f', f.read(4)) header[\"GapThickness\"] = data", "entries are # identical to file version \"2\". # Expected", "BV axes) header[\"PastTransformation\"] = [] for i in range(header[\"NrOfPastSpatialTransformations\"]): header[\"PastTransformation\"].append(dict())", "Expected binary data: float (4 bytes) f.write(struct.pack('<f', trans_values[j])) # Expected", "data # 16-bit data max intensity return header, data_img #", "to file. Returns ------- header : dictionary Pre-data and post-data", "np.reshape] = Y in Tal space # BV (Y top", "Un-Talairach transformation (1 - 5 -> BV axes) header[\"PastTransformation\"] =", "second, more extensive, header. The current # version of VMR", "0: # NOTE(Developer Guide 2.6): For each past transformation, the", "--------------------------------------------------------------------- # Expected binary data: unsigned short int (2 bytes)", "int (4 bytes) data = header[\"PosInfosVerified\"] f.write(struct.pack('<i', data)) data =", "!= 0: for i in range(header[\"NrOfPastSpatialTransformations\"]): # Expected binary data:", "Z coordinate data, = struct.unpack('<f', f.read(4)) header[\"RowDirX\"] = data #", "format 2 in order to represent large data sets efficiently,", "data)) data = header[\"VoxelSizeZ\"] f.write(struct.pack('<f', data)) # Expected binary data:", "range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data: float (4 bytes) f.write(struct.pack('<f', trans_values[j]))", "\"\"\"Read, write, create Brainvoyager VMR file format.\"\"\" import struct import", "= trans_values # Expected binary data: char (1 byte) data,", "internally \"expanded\" # for certain operations. The axes labels are", "with two bytes (short # integers). The VMR format contains", "terminology follows the internal BrainVoyager (BV) format. # The mapping", "version of VMR files is \"4\", which is only slightly", "np from bvbabel.utils import (read_variable_length_string, write_variable_length_string) # ============================================================================= def read_vmr(filename):", "string data = header[\"PastTransformation\"][i][\"Name\"] write_variable_length_string(f, data) # Expected binary data:", "= struct.unpack('<B', f.read(1)) header[\"VoxelResolutionInTALmm\"] = data # Expected binary data:", "struct.unpack('<f', f.read(4)) header[\"Slice1CenterZ\"] = data # First slice center Z", "component data, = struct.unpack('<f', f.read(4)) header[\"ColDirZ\"] = data # Slice", "import struct import numpy as np from bvbabel.utils import (read_variable_length_string,", "higher contain # additional header information after the actual data", "byte. The data is organized in three loops: # DimZ", "trans_values = [] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary", "VMR Post-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): The", "data # Slice row direction vector Z component data, =", "(4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"NRows\"] = data #", "header[\"VMROrigV16MeanValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MaxValue\"] f.write(struct.pack('<i', data)) return print(\"VMR", "--------------------------------------------------------------------- # NOTE(Developer Guide 2.6): VMR files contain anatomical 3D", "= data # Last slice center Z coordinate data, =", "# --------------------------------------------------------------------- # VMR Pre-Data Header # --------------------------------------------------------------------- # NOTE(Developer", "from Nifti standard back to BV standard data_img = data_img[::-1,", "for custom routines, but is # important in BrainVoyager QX", "offset values to # format 2 in order to represent", "= struct.unpack('<i', f.read(4)) header[\"VMROrigV16MinValue\"] = data # 16-bit data min", "voxel resolution. if header[\"File version\"] >= 3: # NOTE(Developer Guide", "Header of VMR file. data_img : numpy.array, 3D Image. \"\"\"", "image matrix # Expected binary data: float (4 bytes) data,", "data : 3D numpy.array Image data. \"\"\" header = dict()", "bytes) data = header[\"OffsetX\"] f.write(struct.pack('<h', data)) data = header[\"OffsetY\"] f.write(struct.pack('<h',", "data)) # --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # Convert", "# --------------------------------------------------------------------- # Convert axes from Nifti standard back to", "resolution along X axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeY\"] =", "data = header[\"Slice1CenterZ\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterX\"] f.write(struct.pack('<f', data))", "data # Last slice center Z coordinate data, = struct.unpack('<f',", "followed by a second, more extensive, header. The current #", "f.read(2)) header[\"OffsetX\"] = data data, = struct.unpack('<h', f.read(2)) header[\"OffsetY\"] =", "many # subsequent values define the transformation: # \"1\": Rigid", "followed by # scan position information from the original file", "0: for i in range(header[\"NrOfPastSpatialTransformations\"]): # Expected binary data: variable-length", "Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): VMR files contain", "information from the original file headers, e.g. from # DICOM", "direction [mm] data, = struct.unpack('<f', f.read(4)) header[\"FoVCols\"] = data #", "# original file version \"1\", file versions 2 and higher", "(2 bytes) data = header[\"OffsetX\"] f.write(struct.pack('<h', data)) data = header[\"OffsetY\"]", "struct.unpack('<f', f.read(4)) header[\"ColDirY\"] = data # Slice column direction vector", "values for # each dimension as well as a value", "np.reshape] = Z in Tal space # BV (Z left", "header[\"PastTransformation\"][i][\"Type\"] f.write(struct.pack('<i', data)) # Expected binary data: variable-length string data", "Y component data, = struct.unpack('<f', f.read(4)) header[\"RowDirZ\"] = data #", "(3 translation, 3 rotation, 3 scale) # \"2\": Affine transformation", "data)) data = header[\"OffsetY\"] f.write(struct.pack('<h', data)) data = header[\"OffsetZ\"] f.write(struct.pack('<h',", "the V16 format # for a version storing each intensity", "f.read(4)) header[\"Slice1CenterX\"] = data # First slice center X coordinate", "a list trans_values = [] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): #", "f.write(struct.pack('<f', data)) data = header[\"GapThickness\"] f.write(struct.pack('<f', data)) # Expected binary", "= data # Field of view extent in row direction", "= header[\"FoVRows\"] f.write(struct.pack('<f', data)) data = header[\"FoVCols\"] f.write(struct.pack('<f', data)) data", "the DICOM # standard. Then follows eventually a section listing", "version \"2\". # Expected binary data: short int (2 bytes)", "data = header[\"ReferenceSpaceVMR\"] f.write(struct.pack('<B', data)) # Expected binary data: float", "how many # subsequent values define the transformation: # \"1\":", "= data # Expected binary data: variable-length string data =", "since file version \"3\" and contain offset values for #", "actual data (\"post-data # header\"). This allows to read VMR", "including the assumed # left-right convention, the reference space (e.g.", "value with two bytes (short # integers). The VMR format", "sets, # typically containing the whole brain (head) of subjects.", "--------------------------------------------------------------------- # Convert axes from Nifti standard back to BV", "struct.unpack('<B', f.read(1)) header[\"VoxelResolutionVerified\"] = data data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionInTALmm\"]", "transformation and # coregistration routines as well as for proper", "a second, more extensive, header. The current # version of", "The coordinate axes labels in these entries are not in", "# --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): VMR files contain anatomical", "[mm] data, = struct.unpack('<f', f.read(4)) header[\"GapThickness\"] = data # Gap", "Z axis # Expected binary data: char (1 byte) data,", "struct.unpack('<f', f.read(4)) trans_values.append(data) header[\"PastTransformation\"][i][\"Values\"] = trans_values # Expected binary data:", "new in v4 # Expected binary data: float (4 bytes)", "read_vmr(filename): \"\"\"Read Brainvoyager VMR file. Parameters ---------- filename : string", "information (if available) # and stores a series of spatial", "= header[\"VoxelSizeX\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeY\"] f.write(struct.pack('<f', data)) data", "= data # 16-bit data min intensity data, = struct.unpack('<i',", "(1 byte) data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionVerified\"] = data data,", "# Voxel resolution along Z axis # Expected binary data:", "center Y coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterZ\"] = data", "mapping to Talairach axes is as follows: # BV (X", "in range(data_img.size): f.write(struct.pack('<B', data_img[i])) # --------------------------------------------------------------------- # VMR Post-Data Header", "bytes) data = header[\"Slice1CenterX\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterY\"] f.write(struct.pack('<f',", "in range(data_img.size): data_img[i], = struct.unpack('<B', f.read(1)) data_img = np.reshape( data_img,", "for a version storing each intensity value with two bytes", "f.read(4)) header[\"SliceNCenterZ\"] = data # Last slice center Z coordinate", "entries are followed by # scan position information from the", "header[\"OffsetX\"] = data data, = struct.unpack('<h', f.read(2)) header[\"OffsetY\"] = data", "# Last slice center X coordinate data, = struct.unpack('<f', f.read(4))", "data)) data = header[\"SliceThickness\"] f.write(struct.pack('<f', data)) data = header[\"GapThickness\"] f.write(struct.pack('<f',", "# NOTE(Developer Guide 2.6): For each past transformation, the #", "# \"1\": Rigid body+scale (3 translation, 3 rotation, 3 scale)", "small header followed by the # actual data followed by", "int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PosInfosVerified\"] = data", "= header[\"VMROrigV16MeanValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MaxValue\"] f.write(struct.pack('<i', data)) return", "header[\"FramingCubeDim\"] f.write(struct.pack('<h', data)) # Expected binary data: int (4 bytes)", "(4 bytes) data = header[\"Slice1CenterX\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterY\"]", "slice center X coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterY\"] =", "data, = struct.unpack('<i', f.read(4)) header[\"PosInfosVerified\"] = data data, = struct.unpack('<i',", "in # file version \"3\" with BrainVoyager QX 1.7. All", "version \"3\" and contain offset values for # each dimension", "the post-data header contains position information (if available) # and", "Z in Tal space # BV (Z left -> right)", "data # First slice center X coordinate data, = struct.unpack('<f',", "data)) data = header[\"DimZ\"] f.write(struct.pack('<H', data)) # --------------------------------------------------------------------- # VMR", "header[\"SliceNCenterY\"] = data # Last slice center Y coordinate data,", "section listing spatial # transformations which have been eventually performed", "# in the post-data header contains position information (if available)", "int (2 bytes) data, = struct.unpack('<H', f.read(2)) header[\"File version\"] =", "binary data: variable-length string data = header[\"PastTransformation\"][i][\"Name\"] write_variable_length_string(f, data) #", "(4 bytes) data = header[\"VoxelSizeX\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeY\"]", "2, 1)) # BV to Tal # Expected binary data:", "------- header : dictionary Pre-data and post-data headers. data :", "--------------------------------------------------------------------- # NOTE(Developer Guide 2.6): The first four entries of", "struct.unpack('<f', f.read(4)) header[\"SliceThickness\"] = data # Slice thickness [mm] data,", "Each data element (intensity value) is # represented in 1", "transformation: # \"1\": Rigid body+scale (3 translation, 3 rotation, 3", "data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterY\"] = data # First slice", "of bytes. See the V16 format # for a version", "header[\"ColDirX\"] f.write(struct.pack('<f', data)) data = header[\"ColDirY\"] f.write(struct.pack('<f', data)) data =", "data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"NrOfValues\"] = data # Store transformation", "= header[\"PastTransformation\"][i][\"Name\"] write_variable_length_string(f, data) # Expected binary data: int (4", "f.write(struct.pack('<i', data)) if header[\"NrOfPastSpatialTransformations\"] != 0: for i in range(header[\"NrOfPastSpatialTransformations\"]):", "f.read(1)) header[\"VoxelResolutionVerified\"] = data data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionInTALmm\"] =", "which have been eventually performed to create the # current", "# Expected binary data: variable-length string data = header[\"PastTransformation\"][i][\"SourceFileName\"] write_variable_length_string(f,", "--------------------------------------------------------------------- if header[\"File version\"] >= 3: # Expected binary data:", "original data set (\"history record\"). The # post-header data can", "in three loops: # DimZ # DimY # DimX #", "read VMR data sets with minimal header # checking if", "header, data_img # ============================================================================= def write_vmr(filename, header, data_img): \"\"\"Protocol to", "struct.unpack('<f', f.read(4)) header[\"SliceNCenterY\"] = data # Last slice center Y", "short int (2 bytes) data = header[\"File version\"] f.write(struct.pack('<H', data))", "data, = struct.unpack('<f', f.read(4)) header[\"FoVRows\"] = data # Field of", "-> right) [axis 0 after np.reshape] = X in Tal", "int (2 bytes) data = header[\"OffsetX\"] f.write(struct.pack('<h', data)) data =", "values, 4x4 matrix) # \"4\": Talairach transformation # \"5\": Un-Talairach", "data = header[\"FramingCubeDim\"] f.write(struct.pack('<h', data)) # Expected binary data: int", "# file version \"3\" with BrainVoyager QX 1.7. All other", "bytes) data = header[\"File version\"] f.write(struct.pack('<H', data)) data = header[\"DimX\"]", "to # format 2 in order to represent large data", "axes labels in these entries are not in # terms", "f.read(2)) header[\"OffsetZ\"] = data data, = struct.unpack('<h', f.read(2)) header[\"FramingCubeDim\"] =", "data_img.flatten() for i in range(data_img.size): f.write(struct.pack('<B', data_img[i])) # --------------------------------------------------------------------- #", "= header[\"ColDirZ\"] f.write(struct.pack('<f', data)) # Expected binary data: int (4", "header[\"ColDirY\"] = data # Slice column direction vector Y component", "element (intensity value) is # represented in 1 byte. The", "transformation (16 values, 4x4 matrix) # \"4\": Talairach transformation #", "(read_variable_length_string, write_variable_length_string) # ============================================================================= def read_vmr(filename): \"\"\"Read Brainvoyager VMR file.", "# for certain operations. The axes labels are in terms", "# standard. Then follows eventually a section listing spatial #", "= data # Expected binary data: int (4 bytes) data,", "f.write(struct.pack('<i', data)) # Expected binary data: variable-length string data =", "f.read(1)) header[\"VoxelResolutionInTALmm\"] = data # Expected binary data: int (4", "binary data: char (1 byte) data, = struct.unpack('<B', f.read(1)) header[\"LeftRightConvention\"]", "int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"NrOfValues\"] = data", "Talairach axes is as follows: # BV (X front ->", "been added in # file version \"3\" with BrainVoyager QX", "header[\"RowDirY\"] = data # Slice row direction vector Y component", "First slice center X coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterY\"]", "= data_img[::-1, ::-1, ::-1] # Flip BV axes data_img =", "thickness [mm] # Expected binary data: int (4 bytes) data,", "f.read(4)) header[\"PosInfosVerified\"] = data data, = struct.unpack('<i', f.read(4)) header[\"CoordinateSystem\"] =", "view extent in column dir. [mm] data, = struct.unpack('<f', f.read(4))", "data, = struct.unpack('<H', f.read(2)) header[\"DimZ\"] = data # --------------------------------------------------------------------- #", "Parameters ---------- filename : string Output filename. header : dictionary", "Data # --------------------------------------------------------------------- # Convert axes from Nifti standard back", "vector X component data, = struct.unpack('<f', f.read(4)) header[\"ColDirY\"] = data", "indicating the size of a cube with # iso-dimensions to", "max intensity return header, data_img # ============================================================================= def write_vmr(filename, header,", "in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data: float (4 bytes) f.write(struct.pack('<f',", "struct import numpy as np from bvbabel.utils import (read_variable_length_string, write_variable_length_string)", "axes) header[\"PastTransformation\"] = [] for i in range(header[\"NrOfPastSpatialTransformations\"]): header[\"PastTransformation\"].append(dict()) #", "= header[\"VoxelSizeZ\"] f.write(struct.pack('<f', data)) # Expected binary data: char (1", "= data # Nr of rows of slice image matrix", "BV axes data_img = np.transpose(data_img, (0, 2, 1)) # BV", "large data sets efficiently, e.g. in # the context of", "bytes) data, = struct.unpack('<f', f.read(4)) header[\"FoVRows\"] = data # Field", "in BrainVoyager QX for spatial transformation and # coregistration routines", "For each past transformation, the # information specified in the", "set (\"history record\"). The # post-header data can be probably", "terms of # BrainVoyager's internal format. These four entries are", "# intensity values are stored as a series of bytes.", "struct.unpack('<H', f.read(2)) header[\"File version\"] = data data, = struct.unpack('<H', f.read(2))", "header[\"PastTransformation\"] = [] for i in range(header[\"NrOfPastSpatialTransformations\"]): header[\"PastTransformation\"].append(dict()) # Expected", "# NOTE(Developer Guide 2.6): The first four entries of the", "= data data, = struct.unpack('<h', f.read(2)) header[\"FramingCubeDim\"] = data #", "with minimal header # checking if the extended information is", "trans_values # Expected binary data: char (1 byte) data, =", "f.read(4)) header[\"FoVCols\"] = data # Field of view extent in", "# Transformation values are stored as a list trans_values =", "data max intensity return header, data_img # ============================================================================= def write_vmr(filename,", "= struct.unpack('<H', f.read(2)) header[\"DimZ\"] = data # --------------------------------------------------------------------- # VMR", "header[\"LeftRightConvention\"] f.write(struct.pack('<B', data)) data = header[\"ReferenceSpaceVMR\"] f.write(struct.pack('<B', data)) # Expected", "f.read(1)) data_img = np.reshape( data_img, (header[\"DimZ\"], header[\"DimY\"], header[\"DimX\"])) data_img =", "# --------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- if header[\"File", "# Gap thickness [mm] # Expected binary data: int (4", "back) [axis 2 after np.reshape] = Y in Tal space", "the # information specified in the following table is stored.", "byte) data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionVerified\"] = data data, =", "binary data: unsigned char (1 byte) data_img = data_img.flatten() for", "struct.unpack('<f', f.read(4)) header[\"SliceNCenterZ\"] = data # Last slice center Z", "Expected binary data: unsigned char (1 byte) data_img = data_img.flatten()", "data = header[\"RowDirZ\"] f.write(struct.pack('<f', data)) data = header[\"ColDirX\"] f.write(struct.pack('<f', data))", "data = header[\"VoxelSizeX\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeY\"] f.write(struct.pack('<f', data))", "X axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeY\"] = data #", "data = header[\"DimX\"] f.write(struct.pack('<H', data)) data = header[\"DimY\"] f.write(struct.pack('<H', data))", "Slice row direction vector Z component data, = struct.unpack('<f', f.read(4))", "available) # and stores a series of spatial transformations, which", "# Convert axes from Nifti standard back to BV standard", "# the context of advanced segmentation processing. Compared to the", "a section listing spatial # transformations which have been eventually", "# header are new since file version \"3\" and contain", "[] for i in range(header[\"NrOfPastSpatialTransformations\"]): header[\"PastTransformation\"].append(dict()) # Expected binary data:", "struct.unpack('<i', f.read(4)) header[\"CoordinateSystem\"] = data # Expected binary data: float", "coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterZ\"] = data # Last", "with open(filename, 'wb') as f: # --------------------------------------------------------------------- # VMR Pre-Data", "= header[\"PastTransformation\"][i][\"Type\"] f.write(struct.pack('<i', data)) # Expected binary data: variable-length string", "data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes data_img", "Expected binary data: int (4 bytes) data = header[\"VMROrigV16MinValue\"] f.write(struct.pack('<i',", "# DimZ # DimY # DimX # # The axes", "= struct.unpack('<f', f.read(4)) header[\"RowDirX\"] = data # Slice row direction", "binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"NrOfPastSpatialTransformations\"]", "binary data: int (4 bytes) data = header[\"PastTransformation\"][i][\"NrOfValues\"] f.write(struct.pack('<i', data))", "(e.g. ACPC transformation). Finally, additional # information further descries the", "= header[\"RowDirZ\"] f.write(struct.pack('<f', data)) data = header[\"ColDirX\"] f.write(struct.pack('<f', data)) data", "= header[\"ColDirY\"] f.write(struct.pack('<f', data)) data = header[\"ColDirZ\"] f.write(struct.pack('<f', data)) #", "= data data, = struct.unpack('<H', f.read(2)) header[\"DimY\"] = data data,", "listing spatial # transformations which have been eventually performed to", "space # Expected binary data: unsigned char (1 byte) data_img", "data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeY\"] = data # Voxel resolution", "back to BV standard data_img = data_img[::-1, ::-1, ::-1] #", "well as for proper visualization. # Expected binary data: unsigned", "data min intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MeanValue\"] = data", "def write_vmr(filename, header, data_img): \"\"\"Protocol to write Brainvoyager VMR file.", "header[\"Slice1CenterY\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterZ\"] f.write(struct.pack('<f', data)) data =", "data, = struct.unpack('<f', f.read(4)) header[\"SliceThickness\"] = data # Slice thickness", "data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MinValue\"] =", "= header[\"SliceNCenterX\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterY\"] f.write(struct.pack('<f', data)) data", "slice center X coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterY\"] =", "= header[\"SliceNCenterZ\"] f.write(struct.pack('<f', data)) data = header[\"RowDirX\"] f.write(struct.pack('<f', data)) data", "stored as a list trans_values = header[\"PastTransformation\"][i][\"Values\"] for j in", "top -> bottom) [axis 1 after np.reshape] = Z in", "component data, = struct.unpack('<f', f.read(4)) header[\"RowDirZ\"] = data # Slice", "= struct.unpack('<B', f.read(1)) data_img = np.reshape( data_img, (header[\"DimZ\"], header[\"DimY\"], header[\"DimX\"]))", "= header[\"CoordinateSystem\"] f.write(struct.pack('<i', data)) # Expected binary data: float (4", "= struct.unpack('<i', f.read(4)) header[\"VMROrigV16MeanValue\"] = data # 16-bit data mean", "additional header information after the actual data (\"post-data # header\").", "to file version \"2\". # Expected binary data: short int", ": dictionary Header of VMR file. data_img : numpy.array, 3D", "coordinate data, = struct.unpack('<f', f.read(4)) header[\"RowDirX\"] = data # Slice", "header[\"VoxelResolutionInTALmm\"] f.write(struct.pack('<B', data)) # Expected binary data: int (4 bytes)", "binary data: float (4 bytes) f.write(struct.pack('<f', trans_values[j])) # Expected binary", "the # current VMR (e.g. ACPC transformation). Finally, additional #", "i in range(data_img.size): f.write(struct.pack('<B', data_img[i])) # --------------------------------------------------------------------- # VMR Post-Data", "binary data: unsigned short int (2 bytes) data, = struct.unpack('<H',", "struct.unpack('<f', f.read(4)) header[\"RowDirX\"] = data # Slice row direction vector", "binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeX\"]", "and # coregistration routines as well as for proper visualization.", "1 after np.reshape] = Z in Tal space # BV", "bytes) data, = struct.unpack('<H', f.read(2)) header[\"File version\"] = data data,", "= data # Voxel resolution along Y axis data, =", "f.write(struct.pack('<B', data)) data = header[\"ReferenceSpaceVMR\"] f.write(struct.pack('<B', data)) # Expected binary", "# Voxel resolution along X axis data, = struct.unpack('<f', f.read(4))", "dictionary Pre-data and post-data headers. data : 3D numpy.array Image", "VMR Data # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): Each data", "data)) data = header[\"DimY\"] f.write(struct.pack('<H', data)) data = header[\"DimZ\"] f.write(struct.pack('<H',", "internal conventions but follow the DICOM # standard. Then follows", "= header[\"File version\"] f.write(struct.pack('<H', data)) data = header[\"DimX\"] f.write(struct.pack('<H', data))", "advanced segmentation processing. Compared to the # original file version", "= struct.unpack('<f', f.read(4)) header[\"SliceNCenterY\"] = data # Last slice center", "= header[\"SliceNCenterY\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterZ\"] f.write(struct.pack('<f', data)) data", "each dimension as well as a value indicating the size", "brain (head) of subjects. The # intensity values are stored", "been # performed to the original data set (\"history record\").", "# VMR Pre-Data Header # --------------------------------------------------------------------- # Expected binary data:", "# \"type of transformation\" is a value determining how many", "# each dimension as well as a value indicating the", "and contain offset values for # each dimension as well", "= struct.unpack('<f', f.read(4)) header[\"RowDirY\"] = data # Slice row direction", "data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"Type\"] =", "header contains position information (if available) # and stores a", "current VMR (e.g. ACPC transformation). Finally, additional # information further", "# First slice center Z coordinate data, = struct.unpack('<f', f.read(4))", "data # Expected binary data: int (4 bytes) data, =", "f.write(struct.pack('<h', data)) data = header[\"OffsetZ\"] f.write(struct.pack('<h', data)) data = header[\"FramingCubeDim\"]", "struct.unpack('<h', f.read(2)) header[\"OffsetZ\"] = data data, = struct.unpack('<h', f.read(2)) header[\"FramingCubeDim\"]", "= np.transpose(data_img, (0, 2, 1)) # BV to Tal #", "is a value determining how many # subsequent values define", "# --------------------------------------------------------------------- # VMR Pre-Data Header # --------------------------------------------------------------------- # Expected", "[axis 0 after np.reshape] = X in Tal space #", "header[\"SliceNCenterX\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterY\"] f.write(struct.pack('<f', data)) data =", "struct.unpack('<i', f.read(4)) header[\"PosInfosVerified\"] = data data, = struct.unpack('<i', f.read(4)) header[\"CoordinateSystem\"]", "data: short int (2 bytes) data = header[\"OffsetX\"] f.write(struct.pack('<h', data))", "data)) # Expected binary data: int (4 bytes) data =", "normalization) and voxel resolution. if header[\"File version\"] >= 3: #", "data: char (1 byte) data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionVerified\"] =", "char (1 byte) data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionVerified\"] = data", "data_img # ============================================================================= def write_vmr(filename, header, data_img): \"\"\"Protocol to write", "VMR Data # --------------------------------------------------------------------- # Convert axes from Nifti standard", "def read_vmr(filename): \"\"\"Read Brainvoyager VMR file. Parameters ---------- filename :", "\"4\": Talairach transformation # \"5\": Un-Talairach transformation (1 - 5", "file. data_img : numpy.array, 3D Image. \"\"\" with open(filename, 'wb')", "\"\"\"Protocol to write Brainvoyager VMR file. Parameters ---------- filename :", "f.read(4)) header[\"Slice1CenterY\"] = data # First slice center Y coordinate", "data # Expected binary data: float (4 bytes) data, =", "data = header[\"SliceNCenterZ\"] f.write(struct.pack('<f', data)) data = header[\"RowDirX\"] f.write(struct.pack('<f', data))", "data = header[\"PastTransformation\"][i][\"Type\"] f.write(struct.pack('<i', data)) # Expected binary data: variable-length", "struct.unpack('<f', f.read(4)) header[\"VoxelSizeX\"] = data # Voxel resolution along X", "data)) # Expected binary data: char (1 byte) data =", "= data # new in v4 # Expected binary data:", "# version 3 (as indicated below). Version 3 added offset", "component data, = struct.unpack('<f', f.read(4)) header[\"ColDirX\"] = data # Slice", "# BV (Y top -> bottom) [axis 1 after np.reshape]", "# identical to file version \"2\". # Expected binary data:", "trans_values.append(data) header[\"PastTransformation\"][i][\"Values\"] = trans_values # Expected binary data: char (1", "header[\"DimY\"] = data data, = struct.unpack('<H', f.read(2)) header[\"DimZ\"] = data", "BV standard data_img = data_img[::-1, ::-1, ::-1] # Flip BV", "# modified in v4 data, = struct.unpack('<B', f.read(1)) header[\"ReferenceSpaceVMR\"] =", "data # Store transformation values as a list trans_values =", "header[\"SliceThickness\"] f.write(struct.pack('<f', data)) data = header[\"GapThickness\"] f.write(struct.pack('<f', data)) # Expected", "Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4))", "--------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6):", "header[\"PastTransformation\"][i][\"Values\"] = trans_values # Expected binary data: char (1 byte)", "after the actual data (\"post-data # header\"). This allows to", "= np.reshape( data_img, (header[\"DimZ\"], header[\"DimY\"], header[\"DimX\"])) data_img = np.transpose(data_img, (0,", "'rb') as f: # --------------------------------------------------------------------- # VMR Pre-Data Header #", "header[\"DimX\"] = data data, = struct.unpack('<H', f.read(2)) header[\"DimY\"] = data", "int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"Type\"] = data", "write_vmr(filename, header, data_img): \"\"\"Protocol to write Brainvoyager VMR file. Parameters", "bytes) data = header[\"VoxelSizeX\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeY\"] f.write(struct.pack('<f',", "f.write(struct.pack('<f', data)) data = header[\"SliceNCenterX\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterY\"]", "# version of VMR files is \"4\", which is only", "reference space (e.g. Talairach after # normalization) and voxel resolution.", "Tal space # BV (Z left -> right) [axis 0", "= struct.unpack('<f', f.read(4)) header[\"FoVRows\"] = data # Field of view", "# NOTE(Developer Guide 2.6): Each data element (intensity value) is", "int (4 bytes) data = header[\"NrOfPastSpatialTransformations\"] f.write(struct.pack('<i', data)) if header[\"NrOfPastSpatialTransformations\"]", "e.g. from # DICOM files. The coordinate axes labels in", "header[\"PastTransformation\"][i][\"NrOfValues\"] f.write(struct.pack('<i', data)) # Transformation values are stored as a", "Y coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterZ\"] = data #", "data sets efficiently, e.g. in # the context of advanced", "= header[\"PastTransformation\"][i][\"NrOfValues\"] f.write(struct.pack('<i', data)) # Transformation values are stored as", "center Z coordinate data, = struct.unpack('<f', f.read(4)) header[\"RowDirX\"] = data", "data, = struct.unpack('<h', f.read(2)) header[\"OffsetY\"] = data data, = struct.unpack('<h',", "file. Returns ------- header : dictionary Pre-data and post-data headers.", "data)) # Expected binary data: variable-length string data = header[\"PastTransformation\"][i][\"SourceFileName\"]", "# 16-bit data mean intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MaxValue\"]", "byte) data_img = data_img.flatten() for i in range(data_img.size): f.write(struct.pack('<B', data_img[i]))", "f.read(4)) header[\"VoxelSizeY\"] = data # Voxel resolution along Y axis", "bytes) data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterX\"] = data # First", "Expected binary data: short int (2 bytes) data, = struct.unpack('<h',", "transformations, which might have been # performed to the original", "anatomical 3D data sets, # typically containing the whole brain", "data, = struct.unpack('<i', f.read(4)) header[\"NrOfPastSpatialTransformations\"] = data if header[\"NrOfPastSpatialTransformations\"] !=", "= struct.unpack('<f', f.read(4)) header[\"RowDirZ\"] = data # Slice row direction", "data, = struct.unpack('<B', f.read(1)) header[\"ReferenceSpaceVMR\"] = data # new in", "the actual data (\"post-data # header\"). This allows to read", "= struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"Type\"] = data # Expected binary data:", "QX 1.7. All other entries are # identical to file", "data: variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"SourceFileName\"] = data #", "header[\"NRows\"] = data # Nr of rows of slice image", "center Z coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterX\"] = data", "data followed by a second, more extensive, header. The current", "'wb') as f: # --------------------------------------------------------------------- # VMR Pre-Data Header #", "header[\"FoVRows\"] = data # Field of view extent in row", "but follow the DICOM # standard. Then follows eventually a", "coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterY\"] = data # First", "are in terms of # BrainVoyager's internal format. These four", "# iso-dimensions to which the data set will be internally", "data, = struct.unpack('<f', f.read(4)) header[\"RowDirX\"] = data # Slice row", "bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"Type\"] = data # Expected", "[axis 1 after np.reshape] = Z in Tal space #", "binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"Type\"]", "The # \"type of transformation\" is a value determining how", "space # BV (Z left -> right) [axis 0 after", "spatial transformations, which might have been # performed to the", "further descries the data set, including the assumed # left-right", "BV (X front -> back) [axis 2 after np.reshape] =", "each past transformation, the # information specified in the following", "# Slice row direction vector Y component data, = struct.unpack('<f',", "determining how many # subsequent values define the transformation: #", "header[\"RowDirY\"] f.write(struct.pack('<f', data)) data = header[\"RowDirZ\"] f.write(struct.pack('<f', data)) data =", "= X in Tal space # Expected binary data: unsigned", "# Expected binary data: char (1 byte) data = header[\"LeftRightConvention\"]", "BV (Y top -> bottom) [axis 1 after np.reshape] =", "header[\"OffsetX\"] f.write(struct.pack('<h', data)) data = header[\"OffsetY\"] f.write(struct.pack('<h', data)) data =", "3D numpy.array Image data. \"\"\" header = dict() with open(filename,", "header[\"DimZ\"] = data # --------------------------------------------------------------------- # VMR Data # ---------------------------------------------------------------------", "string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"SourceFileName\"] = data # Expected binary", "16-bit data max intensity return header, data_img # ============================================================================= def", "data (\"post-data # header\"). This allows to read VMR data", "transformation). Finally, additional # information further descries the data set,", "series of spatial transformations, which might have been # performed", "file format.\"\"\" import struct import numpy as np from bvbabel.utils", "# VMR Data # --------------------------------------------------------------------- # Convert axes from Nifti", "read_variable_length_string(f) header[\"PastTransformation\"][i][\"SourceFileName\"] = data # Expected binary data: int (4", "left -> right) [axis 0 after np.reshape] = X in", "= struct.unpack('<i', f.read(4)) header[\"NCols\"] = data # Nr of columns", "3: # Expected binary data: short int (2 bytes) data", "but is # important in BrainVoyager QX for spatial transformation", "to represent large data sets efficiently, e.g. in # the", "specified in the following table is stored. The # \"type", "= data # Slice row direction vector Z component data,", "QX for spatial transformation and # coregistration routines as well", "int (4 bytes) data = header[\"PastTransformation\"][i][\"Type\"] f.write(struct.pack('<i', data)) # Expected", "might have been # performed to the original data set", "extended information is not needed. The information # in the", "header[\"OffsetZ\"] f.write(struct.pack('<h', data)) data = header[\"FramingCubeDim\"] f.write(struct.pack('<h', data)) # Expected", "# Expected binary data: char (1 byte) data = header[\"VoxelResolutionVerified\"]", "header[\"RowDirX\"] = data # Slice row direction vector X component", "to Tal data_img = data_img[::-1, ::-1, ::-1] # Flip BV", "only slightly different from # version 3 (as indicated below).", "center X coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterY\"] = data", "rows of slice image matrix data, = struct.unpack('<i', f.read(4)) header[\"NCols\"]", "f.write(struct.pack('<h', data)) # Expected binary data: int (4 bytes) data", "= header[\"RowDirX\"] f.write(struct.pack('<f', data)) data = header[\"RowDirY\"] f.write(struct.pack('<f', data)) data", "int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MinValue\"] = data", "to Talairach axes is as follows: # BV (X front", "size of a cube with # iso-dimensions to which the", "filename : string Output filename. header : dictionary Header of", "intensity values are stored as a series of bytes. See", "Guide 2.6): Each data element (intensity value) is # represented", "# post-header data can be probably ignored for custom routines,", "Expected binary data: short int (2 bytes) data = header[\"OffsetX\"]", "float (4 bytes) data = header[\"Slice1CenterX\"] f.write(struct.pack('<f', data)) data =", "(Z left -> right) [axis 0 after np.reshape] = X", "= struct.unpack('<H', f.read(2)) header[\"DimX\"] = data data, = struct.unpack('<H', f.read(2))", "bytes) data = header[\"NRows\"] f.write(struct.pack('<i', data)) data = header[\"NCols\"] f.write(struct.pack('<i',", "data = header[\"PastTransformation\"][i][\"SourceFileName\"] write_variable_length_string(f, data) # Expected binary data: int", "write, create Brainvoyager VMR file format.\"\"\" import struct import numpy", "header[\"DimZ\"] f.write(struct.pack('<H', data)) # --------------------------------------------------------------------- # VMR Data # ---------------------------------------------------------------------", "coregistration routines as well as for proper visualization. # Expected", "data, = struct.unpack('<B', f.read(1)) header[\"LeftRightConvention\"] = data # modified in", "as for proper visualization. # Expected binary data: unsigned short", "data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PosInfosVerified\"] =", "struct.unpack('<i', f.read(4)) header[\"NRows\"] = data # Nr of rows of", "eventually performed to create the # current VMR (e.g. ACPC", "============================================================================= def read_vmr(filename): \"\"\"Read Brainvoyager VMR file. Parameters ---------- filename", "probably ignored for custom routines, but is # important in", "transformation\" is a value determining how many # subsequent values", "the original data set (\"history record\"). The # post-header data", "of BrainVoyager's internal conventions but follow the DICOM # standard.", "Expected binary data: int (4 bytes) data = header[\"NrOfPastSpatialTransformations\"] f.write(struct.pack('<i',", "header[\"PastTransformation\"][i][\"Values\"] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data: float", "1)) # BV to Tal data_img = data_img[::-1, ::-1, ::-1]", "char (1 byte) data_img = np.zeros((header[\"DimZ\"] * header[\"DimY\"] * header[\"DimX\"]),", "is organized in three loops: # DimZ # DimY #", "header[\"PastTransformation\"][i][\"SourceFileName\"] = data # Expected binary data: int (4 bytes)", "binary data: int (4 bytes) data = header[\"PastTransformation\"][i][\"Type\"] f.write(struct.pack('<i', data))", "data)) data = header[\"VoxelSizeY\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeZ\"] f.write(struct.pack('<f',", "headers. data : 3D numpy.array Image data. \"\"\" header =", "image matrix data, = struct.unpack('<i', f.read(4)) header[\"NCols\"] = data #", "a value determining how many # subsequent values define the", "data = header[\"DimZ\"] f.write(struct.pack('<H', data)) # --------------------------------------------------------------------- # VMR Data", "(4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"Type\"] = data #", "by # scan position information from the original file headers,", "= struct.unpack('<i', f.read(4)) header[\"CoordinateSystem\"] = data # Expected binary data:", "f.write(struct.pack('<f', data)) data = header[\"Slice1CenterZ\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterX\"]", "# Expected binary data: float (4 bytes) data = header[\"VoxelSizeX\"]", "data)) data = header[\"ReferenceSpaceVMR\"] f.write(struct.pack('<B', data)) # Expected binary data:", "f.write(struct.pack('<i', data)) # Expected binary data: float (4 bytes) data", "binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"FoVRows\"]", "version\"] >= 3: # Expected binary data: short int (2", ": numpy.array, 3D Image. \"\"\" with open(filename, 'wb') as f:", "(4 bytes) data = header[\"NRows\"] f.write(struct.pack('<i', data)) data = header[\"NCols\"]", "= struct.unpack('<f', f.read(4)) header[\"SliceNCenterX\"] = data # Last slice center", "header[\"Slice1CenterX\"] = data # First slice center X coordinate data,", "in terms of # BrainVoyager's internal format. These four entries", "# --------------------------------------------------------------------- # Expected binary data: unsigned short int (2", "added in # file version \"3\" with BrainVoyager QX 1.7.", "additional # information further descries the data set, including the", "i in range(header[\"NrOfPastSpatialTransformations\"]): # Expected binary data: variable-length string data", "data: int (4 bytes) data = header[\"VMROrigV16MinValue\"] f.write(struct.pack('<i', data)) data", "data # Field of view extent in column dir. [mm]", "binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterX\"]", "= header[\"Slice1CenterX\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterY\"] f.write(struct.pack('<f', data)) data", "# Slice column direction vector Z component # Expected binary", "bytes) data = header[\"NrOfPastSpatialTransformations\"] f.write(struct.pack('<i', data)) if header[\"NrOfPastSpatialTransformations\"] != 0:", "space (e.g. Talairach after # normalization) and voxel resolution. if", "data # Slice row direction vector X component data, =", "header : dictionary Pre-data and post-data headers. data : 3D", "= struct.unpack('<H', f.read(2)) header[\"File version\"] = data data, = struct.unpack('<H',", "[] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data: float", "header[\"VoxelSizeX\"] = data # Voxel resolution along X axis data,", "intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MaxValue\"] = data # 16-bit", "is # represented in 1 byte. The data is organized", "data, = struct.unpack('<H', f.read(2)) header[\"File version\"] = data data, =", "header[\"SliceNCenterZ\"] f.write(struct.pack('<f', data)) data = header[\"RowDirX\"] f.write(struct.pack('<f', data)) data =", "header # checking if the extended information is not needed.", "binary data: variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"SourceFileName\"] = data", "= data # Last slice center Y coordinate data, =", "after np.reshape] = X in Tal space # Expected binary", "data_img : numpy.array, 3D Image. \"\"\" with open(filename, 'wb') as", "Finally, additional # information further descries the data set, including", "return header, data_img # ============================================================================= def write_vmr(filename, header, data_img): \"\"\"Protocol", "Store transformation values as a list trans_values = [] for", "post-data headers. data : 3D numpy.array Image data. \"\"\" header", "# First slice center Y coordinate data, = struct.unpack('<f', f.read(4))", "= data data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionInTALmm\"] = data #", "been eventually performed to create the # current VMR (e.g.", "f.read(4)) header[\"PastTransformation\"][i][\"Type\"] = data # Expected binary data: variable-length string", "3 added offset values to # format 2 in order", "header[\"VMROrigV16MaxValue\"] = data # 16-bit data max intensity return header,", "header[\"VoxelSizeZ\"] = data # Voxel resolution along Z axis #", "::-1, ::-1] # Flip BV axes # --------------------------------------------------------------------- # VMR", "rotation, 3 scale) # \"2\": Affine transformation (16 values, 4x4", "\"3\" and contain offset values for # each dimension as", "struct.unpack('<i', f.read(4)) header[\"NrOfPastSpatialTransformations\"] = data if header[\"NrOfPastSpatialTransformations\"] != 0: #", "= header[\"RowDirY\"] f.write(struct.pack('<f', data)) data = header[\"RowDirZ\"] f.write(struct.pack('<f', data)) data", "represent large data sets efficiently, e.g. in # the context", "unsigned short int (2 bytes) data = header[\"File version\"] f.write(struct.pack('<H',", "to the original data set (\"history record\"). The # post-header", "data_img = data_img.flatten() for i in range(data_img.size): f.write(struct.pack('<B', data_img[i])) #", "range(header[\"NrOfPastSpatialTransformations\"]): # Expected binary data: variable-length string data = header[\"PastTransformation\"][i][\"Name\"]", "data # Slice row direction vector Y component data, =", "in range(header[\"NrOfPastSpatialTransformations\"]): # Expected binary data: variable-length string data =", "Flip BV axes # --------------------------------------------------------------------- # VMR Post-Data Header #", "= dict() with open(filename, 'rb') as f: # --------------------------------------------------------------------- #", "f.read(4)) header[\"VMROrigV16MeanValue\"] = data # 16-bit data mean intensity data,", "# subsequent values define the transformation: # \"1\": Rigid body+scale", "f.write(struct.pack('<B', data)) # Expected binary data: float (4 bytes) data", "f.read(4)) trans_values.append(data) header[\"PastTransformation\"][i][\"Values\"] = trans_values # Expected binary data: char", "np.reshape] = X in Tal space # Expected binary data:", "data # 16-bit data mean intensity data, = struct.unpack('<i', f.read(4))", "bytes) data, = struct.unpack('<i', f.read(4)) header[\"PosInfosVerified\"] = data data, =", "4x4 matrix) # \"4\": Talairach transformation # \"5\": Un-Talairach transformation", "# current VMR (e.g. ACPC transformation). Finally, additional # information", "1.7. All other entries are # identical to file version", "float (4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterX\"] = data", "version\"] f.write(struct.pack('<H', data)) data = header[\"DimX\"] f.write(struct.pack('<H', data)) data =", "data # Slice thickness [mm] data, = struct.unpack('<f', f.read(4)) header[\"GapThickness\"]", "data = header[\"OffsetY\"] f.write(struct.pack('<h', data)) data = header[\"OffsetZ\"] f.write(struct.pack('<h', data))", "as a value indicating the size of a cube with", "= struct.unpack('<B', f.read(1)) header[\"ReferenceSpaceVMR\"] = data # new in v4", "Slice row direction vector Y component data, = struct.unpack('<f', f.read(4))", "-> bottom) [axis 1 after np.reshape] = Z in Tal", "(intensity value) is # represented in 1 byte. The data", "f.write(struct.pack('<f', data)) data = header[\"SliceThickness\"] f.write(struct.pack('<f', data)) data = header[\"GapThickness\"]", "binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"NRows\"]", "These four entries are followed by # scan position information", "entries of the post-data # header are new since file", "slice image matrix # Expected binary data: float (4 bytes)", "Expected binary data: variable-length string data = header[\"PastTransformation\"][i][\"SourceFileName\"] write_variable_length_string(f, data)", "binary data: char (1 byte) data = header[\"LeftRightConvention\"] f.write(struct.pack('<B', data))", "Voxel resolution along X axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeY\"]", "header, data_img): \"\"\"Protocol to write Brainvoyager VMR file. Parameters ----------", "dict() with open(filename, 'rb') as f: # --------------------------------------------------------------------- # VMR", "numpy as np from bvbabel.utils import (read_variable_length_string, write_variable_length_string) # =============================================================================", "is as follows: # BV (X front -> back) [axis", "# information further descries the data set, including the assumed", "(4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"FoVRows\"] = data #", "of transformation\" is a value determining how many # subsequent", "VMR file. data_img : numpy.array, 3D Image. \"\"\" with open(filename,", "data: unsigned char (1 byte) data_img = np.zeros((header[\"DimZ\"] * header[\"DimY\"]", "Guide 2.6): VMR files contain anatomical 3D data sets, #", "e.g. in # the context of advanced segmentation processing. Compared", "= struct.unpack('<h', f.read(2)) header[\"OffsetY\"] = data data, = struct.unpack('<h', f.read(2))", "data = header[\"SliceNCenterY\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterZ\"] f.write(struct.pack('<f', data))", "define the transformation: # \"1\": Rigid body+scale (3 translation, 3", "f.read(2)) header[\"File version\"] = data data, = struct.unpack('<H', f.read(2)) header[\"DimX\"]", "The # intensity values are stored as a series of", "data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterY\"] = data # Last slice", "(4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"NrOfValues\"] = data #", "int (4 bytes) data = header[\"PastTransformation\"][i][\"NrOfValues\"] f.write(struct.pack('<i', data)) # Transformation", "f.read(4)) header[\"RowDirZ\"] = data # Slice row direction vector Z", "struct.unpack('<H', f.read(2)) header[\"DimY\"] = data data, = struct.unpack('<H', f.read(2)) header[\"DimZ\"]", "The axes terminology follows the internal BrainVoyager (BV) format. #", "import numpy as np from bvbabel.utils import (read_variable_length_string, write_variable_length_string) #", "[mm] data, = struct.unpack('<f', f.read(4)) header[\"FoVCols\"] = data # Field", "\"type of transformation\" is a value determining how many #", "\"4\", which is only slightly different from # version 3", ": string Output filename. header : dictionary Header of VMR", "coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterX\"] = data # Last", "variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"SourceFileName\"] = data # Expected", "a series of bytes. See the V16 format # for", "the original file headers, e.g. from # DICOM files. The", "bytes. See the V16 format # for a version storing", "standard back to BV standard data_img = data_img[::-1, ::-1, ::-1]", "struct.unpack('<i', f.read(4)) header[\"NCols\"] = data # Nr of columns of", "f.read(2)) header[\"DimX\"] = data data, = struct.unpack('<H', f.read(2)) header[\"DimY\"] =", "version 3 (as indicated below). Version 3 added offset values", "f.write(struct.pack('<f', data)) # Expected binary data: char (1 byte) data", "(4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"NrOfPastSpatialTransformations\"] = data if", "header[\"NrOfPastSpatialTransformations\"] = data if header[\"NrOfPastSpatialTransformations\"] != 0: # NOTE(Developer Guide", "format. These four entries are followed by # scan position", "Expected binary data: unsigned char (1 byte) data_img = np.zeros((header[\"DimZ\"]", "= struct.unpack('<f', f.read(4)) header[\"ColDirZ\"] = data # Slice column direction", "struct.unpack('<h', f.read(2)) header[\"FramingCubeDim\"] = data # Expected binary data: int", "(4 bytes) data = header[\"FoVRows\"] f.write(struct.pack('<f', data)) data = header[\"FoVCols\"]", "Image. \"\"\" with open(filename, 'wb') as f: # --------------------------------------------------------------------- #", "data, = struct.unpack('<f', f.read(4)) header[\"ColDirZ\"] = data # Slice column", "Voxel resolution along Y axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeZ\"]", "f.write(struct.pack('<f', data)) data = header[\"ColDirX\"] f.write(struct.pack('<f', data)) data = header[\"ColDirY\"]", "data) # Expected binary data: int (4 bytes) data =", "values as a list trans_values = [] for j in", "internal format. These four entries are followed by # scan", "See the V16 format # for a version storing each", "Nr of columns of slice image matrix # Expected binary", "open(filename, 'rb') as f: # --------------------------------------------------------------------- # VMR Pre-Data Header", "2, 1)) # BV to Tal data_img = data_img[::-1, ::-1,", "data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterX\"] =", "data: char (1 byte) data = header[\"LeftRightConvention\"] f.write(struct.pack('<B', data)) data", "is stored. The # \"type of transformation\" is a value", "performed to the original data set (\"history record\"). The #", "2 and higher contain # additional header information after the", "variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"Name\"] = data # Expected", "data element (intensity value) is # represented in 1 byte.", "dimension as well as a value indicating the size of", "struct.unpack('<B', f.read(1)) header[\"VoxelResolutionInTALmm\"] = data # Expected binary data: int", "for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data: float (4", "binary data: int (4 bytes) data = header[\"NRows\"] f.write(struct.pack('<i', data))", "information after the actual data (\"post-data # header\"). This allows", "# --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # Convert axes", "data data, = struct.unpack('<h', f.read(2)) header[\"OffsetY\"] = data data, =", "string Path to file. Returns ------- header : dictionary Pre-data", "the size of a cube with # iso-dimensions to which", "version \"1\", file versions 2 and higher contain # additional", "in Tal space # BV (Z left -> right) [axis", "data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal", "data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"FoVRows\"] =", "float (4 bytes) data = header[\"FoVRows\"] f.write(struct.pack('<f', data)) data =", "# VMR Pre-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6):", "data)) data = header[\"Slice1CenterZ\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterX\"] f.write(struct.pack('<f',", "struct.unpack('<f', f.read(4)) header[\"RowDirY\"] = data # Slice row direction vector", "data mean intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MaxValue\"] = data", "data = header[\"FoVCols\"] f.write(struct.pack('<f', data)) data = header[\"SliceThickness\"] f.write(struct.pack('<f', data))", "# Nr of columns of slice image matrix # Expected", "of columns of slice image matrix # Expected binary data:", "after np.reshape] = Z in Tal space # BV (Z", "axes terminology follows the internal BrainVoyager (BV) format. # The", "np.zeros((header[\"DimZ\"] * header[\"DimY\"] * header[\"DimX\"]), dtype=\"<B\") for i in range(data_img.size):", "data = header[\"VMROrigV16MeanValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MaxValue\"] f.write(struct.pack('<i', data))", "data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeX\"] =", "data. \"\"\" header = dict() with open(filename, 'rb') as f:", "bytes (short # integers). The VMR format contains a small", "= data # Last slice center X coordinate data, =", "f.read(4)) header[\"NrOfPastSpatialTransformations\"] = data if header[\"NrOfPastSpatialTransformations\"] != 0: # NOTE(Developer", "intensity value with two bytes (short # integers). The VMR", "contain offset values for # each dimension as well as", "of rows of slice image matrix data, = struct.unpack('<i', f.read(4))", "Tal # Expected binary data: unsigned char (1 byte) data_img", "follow the DICOM # standard. Then follows eventually a section", "is \"4\", which is only slightly different from # version", "# BV to Tal # Expected binary data: unsigned char", "Guide 2.6): These four entries have been added in #", "# performed to the original data set (\"history record\"). The", "column dir. [mm] data, = struct.unpack('<f', f.read(4)) header[\"SliceThickness\"] = data", "post-data header contains position information (if available) # and stores", "VMR Pre-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): VMR", "This allows to read VMR data sets with minimal header", "as a list trans_values = [] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]):", "center X coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterY\"] = data", "transformation # \"5\": Un-Talairach transformation (1 - 5 -> BV", "sets efficiently, e.g. in # the context of advanced segmentation", "= data # Voxel resolution along X axis data, =", "Y in Tal space # BV (Y top -> bottom)", "be internally \"expanded\" # for certain operations. The axes labels", "struct.unpack('<H', f.read(2)) header[\"DimZ\"] = data # --------------------------------------------------------------------- # VMR Data", "header[\"OffsetY\"] = data data, = struct.unpack('<h', f.read(2)) header[\"OffsetZ\"] = data", "indicated below). Version 3 added offset values to # format", "f.write(struct.pack('<f', data)) data = header[\"FoVCols\"] f.write(struct.pack('<f', data)) data = header[\"SliceThickness\"]", "struct.unpack('<f', f.read(4)) header[\"RowDirZ\"] = data # Slice row direction vector", "bytes) data, = struct.unpack('<h', f.read(2)) header[\"OffsetX\"] = data data, =", "f.write(struct.pack('<h', data)) data = header[\"FramingCubeDim\"] f.write(struct.pack('<h', data)) # Expected binary", "--------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- if header[\"File version\"]", "header[\"FoVCols\"] f.write(struct.pack('<f', data)) data = header[\"SliceThickness\"] f.write(struct.pack('<f', data)) data =", "data)) data = header[\"SliceNCenterX\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterY\"] f.write(struct.pack('<f',", "Expected binary data: float (4 bytes) data = header[\"Slice1CenterX\"] f.write(struct.pack('<f',", "char (1 byte) data = header[\"LeftRightConvention\"] f.write(struct.pack('<B', data)) data =", "(\"history record\"). The # post-header data can be probably ignored", "write Brainvoyager VMR file. Parameters ---------- filename : string Output", "Slice column direction vector X component data, = struct.unpack('<f', f.read(4))", "minimal header # checking if the extended information is not", "in range(header[\"NrOfPastSpatialTransformations\"]): header[\"PastTransformation\"].append(dict()) # Expected binary data: variable-length string data", "data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"Name\"] = data # Expected binary data:", "3 rotation, 3 scale) # \"2\": Affine transformation (16 values,", "data, = struct.unpack('<f', f.read(4)) header[\"GapThickness\"] = data # Gap thickness", "bytes) data = header[\"PastTransformation\"][i][\"Type\"] f.write(struct.pack('<i', data)) # Expected binary data:", "data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) trans_values.append(data) header[\"PastTransformation\"][i][\"Values\"]", "more extensive, header. The current # version of VMR files", "header[\"SliceNCenterY\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterZ\"] f.write(struct.pack('<f', data)) data =", "(e.g. Talairach after # normalization) and voxel resolution. if header[\"File", "file. Parameters ---------- filename : string Path to file. Returns", "data data, = struct.unpack('<H', f.read(2)) header[\"DimY\"] = data data, =", "(0, 2, 1)) # BV to Tal # Expected binary", "version storing each intensity value with two bytes (short #", "data = header[\"File version\"] f.write(struct.pack('<H', data)) data = header[\"DimX\"] f.write(struct.pack('<H',", "for proper visualization. # Expected binary data: unsigned short int", "header[\"OffsetZ\"] = data data, = struct.unpack('<h', f.read(2)) header[\"FramingCubeDim\"] = data", "data: int (4 bytes) data = header[\"NRows\"] f.write(struct.pack('<i', data)) data", "data data, = struct.unpack('<h', f.read(2)) header[\"OffsetZ\"] = data data, =", "Last slice center X coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterY\"]", "struct.unpack('<f', f.read(4)) header[\"GapThickness\"] = data # Gap thickness [mm] #", "create the # current VMR (e.g. ACPC transformation). Finally, additional", ">= 3: # Expected binary data: short int (2 bytes)", "right) [axis 0 after np.reshape] = X in Tal space", "stored as a series of bytes. See the V16 format", "which is only slightly different from # version 3 (as", "# First slice center X coordinate data, = struct.unpack('<f', f.read(4))", "write_variable_length_string(f, data) # Expected binary data: int (4 bytes) data", "version \"3\" with BrainVoyager QX 1.7. All other entries are", "vector Z component # Expected binary data: int (4 bytes)", "struct.unpack('<i', f.read(4)) header[\"VMROrigV16MeanValue\"] = data # 16-bit data mean intensity", "f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MeanValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MaxValue\"]", "three loops: # DimZ # DimY # DimX # #", "data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"NrOfValues\"] =", "header[\"DimX\"]), dtype=\"<B\") for i in range(data_img.size): data_img[i], = struct.unpack('<B', f.read(1))", "header[\"File version\"] >= 3: # Expected binary data: short int", "3 (as indicated below). Version 3 added offset values to", "a value indicating the size of a cube with #", "of a cube with # iso-dimensions to which the data", "Slice column direction vector Z component # Expected binary data:", "BrainVoyager (BV) format. # The mapping to Talairach axes is", "Expected binary data: variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"Name\"] =", "data, = struct.unpack('<f', f.read(4)) header[\"ColDirY\"] = data # Slice column", "f.write(struct.pack('<f', data)) data = header[\"RowDirZ\"] f.write(struct.pack('<f', data)) data = header[\"ColDirX\"]", "each intensity value with two bytes (short # integers). The", "= Y in Tal space # BV (Y top ->", "3 scale) # \"2\": Affine transformation (16 values, 4x4 matrix)", "with # iso-dimensions to which the data set will be", "data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MeanValue\"] = data # 16-bit data", "byte) data = header[\"LeftRightConvention\"] f.write(struct.pack('<B', data)) data = header[\"ReferenceSpaceVMR\"] f.write(struct.pack('<B',", "slice image matrix data, = struct.unpack('<i', f.read(4)) header[\"NCols\"] = data", "(1 - 5 -> BV axes) header[\"PastTransformation\"] = [] for", "Z coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterX\"] = data #", "data_img, (header[\"DimZ\"], header[\"DimY\"], header[\"DimX\"])) data_img = np.transpose(data_img, (0, 2, 1))", "# Expected binary data: unsigned char (1 byte) data_img =", "loops: # DimZ # DimY # DimX # # The", "= data data, = struct.unpack('<H', f.read(2)) header[\"DimZ\"] = data #", "range(data_img.size): f.write(struct.pack('<B', data_img[i])) # --------------------------------------------------------------------- # VMR Post-Data Header #", "front -> back) [axis 2 after np.reshape] = Y in", "Slice column direction vector Y component data, = struct.unpack('<f', f.read(4))", "char (1 byte) data, = struct.unpack('<B', f.read(1)) header[\"LeftRightConvention\"] = data", "BrainVoyager's internal format. These four entries are followed by #", "---------- filename : string Output filename. header : dictionary Header", "# Expected binary data: char (1 byte) data, = struct.unpack('<B',", "# Expected binary data: int (4 bytes) data = header[\"PastTransformation\"][i][\"NrOfValues\"]", "custom routines, but is # important in BrainVoyager QX for", "data, = struct.unpack('<h', f.read(2)) header[\"FramingCubeDim\"] = data # Expected binary", "data: variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"Name\"] = data #", "original file version \"1\", file versions 2 and higher contain", "data data, = struct.unpack('<h', f.read(2)) header[\"FramingCubeDim\"] = data # Expected", "added offset values to # format 2 in order to", "= struct.unpack('<i', f.read(4)) header[\"NrOfPastSpatialTransformations\"] = data if header[\"NrOfPastSpatialTransformations\"] != 0:", "# Field of view extent in row direction [mm] data,", "binary data: short int (2 bytes) data, = struct.unpack('<h', f.read(2))", "the whole brain (head) of subjects. The # intensity values", "row direction vector Y component data, = struct.unpack('<f', f.read(4)) header[\"RowDirZ\"]", "data # Voxel resolution along X axis data, = struct.unpack('<f',", "entries have been added in # file version \"3\" with", "for certain operations. The axes labels are in terms of", "# Slice column direction vector X component data, = struct.unpack('<f',", "Talairach transformation # \"5\": Un-Talairach transformation (1 - 5 ->", "axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeY\"] = data # Voxel", "(1 byte) data = header[\"LeftRightConvention\"] f.write(struct.pack('<B', data)) data = header[\"ReferenceSpaceVMR\"]", "(BV) format. # The mapping to Talairach axes is as", "The # post-header data can be probably ignored for custom", "direction vector Z component data, = struct.unpack('<f', f.read(4)) header[\"ColDirX\"] =", "data # Slice column direction vector X component data, =", "coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterY\"] = data # Last", "for i in range(header[\"NrOfPastSpatialTransformations\"]): header[\"PastTransformation\"].append(dict()) # Expected binary data: variable-length", "header[\"RowDirZ\"] f.write(struct.pack('<f', data)) data = header[\"ColDirX\"] f.write(struct.pack('<f', data)) data =", "file version \"3\" and contain offset values for # each", "entries are not in # terms of BrainVoyager's internal conventions", "header[\"Slice1CenterZ\"] = data # First slice center Z coordinate data,", "16-bit data min intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MeanValue\"] =", "VMR Pre-Data Header # --------------------------------------------------------------------- # Expected binary data: unsigned", "============================================================================= def write_vmr(filename, header, data_img): \"\"\"Protocol to write Brainvoyager VMR", "header[\"PosInfosVerified\"] f.write(struct.pack('<i', data)) data = header[\"CoordinateSystem\"] f.write(struct.pack('<i', data)) # Expected", "f.write(struct.pack('<H', data)) data = header[\"DimZ\"] f.write(struct.pack('<H', data)) # --------------------------------------------------------------------- #", "binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"NrOfValues\"]", "= [] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data:", "float (4 bytes) f.write(struct.pack('<f', trans_values[j])) # Expected binary data: char", "binary data: short int (2 bytes) data = header[\"OffsetX\"] f.write(struct.pack('<h',", "data # Nr of columns of slice image matrix #", "contain anatomical 3D data sets, # typically containing the whole", "header[\"DimX\"] f.write(struct.pack('<H', data)) data = header[\"DimY\"] f.write(struct.pack('<H', data)) data =", "# Expected binary data: unsigned short int (2 bytes) data", "BV (Z left -> right) [axis 0 after np.reshape] =", "header[\"ReferenceSpaceVMR\"] = data # new in v4 # Expected binary", "# \"4\": Talairach transformation # \"5\": Un-Talairach transformation (1 -", "to read VMR data sets with minimal header # checking", "string Output filename. header : dictionary Header of VMR file.", "file version \"2\". # Expected binary data: short int (2", "import (read_variable_length_string, write_variable_length_string) # ============================================================================= def read_vmr(filename): \"\"\"Read Brainvoyager VMR", "VMR file. Parameters ---------- filename : string Path to file.", "the internal BrainVoyager (BV) format. # The mapping to Talairach", "certain operations. The axes labels are in terms of #", "Expected binary data: char (1 byte) data = header[\"VoxelResolutionVerified\"] f.write(struct.pack('<B',", "direction vector Y component data, = struct.unpack('<f', f.read(4)) header[\"ColDirZ\"] =", "data = header[\"FoVRows\"] f.write(struct.pack('<f', data)) data = header[\"FoVCols\"] f.write(struct.pack('<f', data))", "row direction vector Z component data, = struct.unpack('<f', f.read(4)) header[\"ColDirX\"]", "ignored for custom routines, but is # important in BrainVoyager", "Tal space # Expected binary data: unsigned char (1 byte)", "header[\"VoxelSizeY\"] = data # Voxel resolution along Y axis data,", "slightly different from # version 3 (as indicated below). Version", "(X front -> back) [axis 2 after np.reshape] = Y", "direction vector Y component data, = struct.unpack('<f', f.read(4)) header[\"RowDirZ\"] =", "Talairach after # normalization) and voxel resolution. if header[\"File version\"]", "binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) trans_values.append(data)", "# Expected binary data: float (4 bytes) data, = struct.unpack('<f',", "scan position information from the original file headers, e.g. from", "header information after the actual data (\"post-data # header\"). This", "(2 bytes) data, = struct.unpack('<H', f.read(2)) header[\"File version\"] = data", "data_img[i], = struct.unpack('<B', f.read(1)) data_img = np.reshape( data_img, (header[\"DimZ\"], header[\"DimY\"],", "iso-dimensions to which the data set will be internally \"expanded\"", "range(data_img.size): data_img[i], = struct.unpack('<B', f.read(1)) data_img = np.reshape( data_img, (header[\"DimZ\"],", "first four entries of the post-data # header are new", "# Last slice center Y coordinate data, = struct.unpack('<f', f.read(4))", "header followed by the # actual data followed by a", "followed by the # actual data followed by a second,", "# Expected binary data: int (4 bytes) data = header[\"PosInfosVerified\"]", "assumed # left-right convention, the reference space (e.g. Talairach after", "data: char (1 byte) data, = struct.unpack('<B', f.read(1)) header[\"LeftRightConvention\"] =", "format # for a version storing each intensity value with", "data, = struct.unpack('<f', f.read(4)) header[\"ColDirX\"] = data # Slice column", "Expected binary data: variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"SourceFileName\"] =", "struct.unpack('<H', f.read(2)) header[\"DimX\"] = data data, = struct.unpack('<H', f.read(2)) header[\"DimY\"]", "bvbabel.utils import (read_variable_length_string, write_variable_length_string) # ============================================================================= def read_vmr(filename): \"\"\"Read Brainvoyager", "binary data: float (4 bytes) data = header[\"VoxelSizeX\"] f.write(struct.pack('<f', data))", "VMR format contains a small header followed by the #", "to BV standard data_img = data_img[::-1, ::-1, ::-1] # Flip", "= struct.unpack('<i', f.read(4)) header[\"NRows\"] = data # Nr of rows", "data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterX\"] = data # Last slice", "column direction vector Y component data, = struct.unpack('<f', f.read(4)) header[\"ColDirZ\"]", "= data # First slice center X coordinate data, =", "position information from the original file headers, e.g. from #", "DimY # DimX # # The axes terminology follows the", "data = header[\"VoxelSizeZ\"] f.write(struct.pack('<f', data)) # Expected binary data: char", "\"3\" with BrainVoyager QX 1.7. All other entries are #", "slice center Y coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterZ\"] =", "values define the transformation: # \"1\": Rigid body+scale (3 translation,", "= header[\"FramingCubeDim\"] f.write(struct.pack('<h', data)) # Expected binary data: int (4", "= struct.unpack('<f', f.read(4)) header[\"Slice1CenterZ\"] = data # First slice center", "f.write(struct.pack('<f', data)) data = header[\"RowDirX\"] f.write(struct.pack('<f', data)) data = header[\"RowDirY\"]", "checking if the extended information is not needed. The information", "(2 bytes) data = header[\"File version\"] f.write(struct.pack('<H', data)) data =", "First slice center Y coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterZ\"]", "v4 # Expected binary data: float (4 bytes) data, =", "f.read(4)) header[\"VoxelSizeZ\"] = data # Voxel resolution along Z axis", "information specified in the following table is stored. The #", "# DimX # # The axes terminology follows the internal", "These four entries have been added in # file version", "f.write(struct.pack('<f', data)) data = header[\"VoxelSizeZ\"] f.write(struct.pack('<f', data)) # Expected binary", "binary data: char (1 byte) data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionVerified\"]", "list trans_values = header[\"PastTransformation\"][i][\"Values\"] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected", "contains position information (if available) # and stores a series", "a list trans_values = header[\"PastTransformation\"][i][\"Values\"] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): #", "Y axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeZ\"] = data #", "f.write(struct.pack('<i', data)) # Transformation values are stored as a list", "Z component data, = struct.unpack('<f', f.read(4)) header[\"ColDirX\"] = data #", "data, = struct.unpack('<h', f.read(2)) header[\"OffsetZ\"] = data data, = struct.unpack('<h',", "data)) data = header[\"DimX\"] f.write(struct.pack('<H', data)) data = header[\"DimY\"] f.write(struct.pack('<H',", "data_img[::-1, ::-1, ::-1] # Flip BV axes data_img = np.transpose(data_img,", "Compared to the # original file version \"1\", file versions", "# Expected binary data: short int (2 bytes) data, =", "(1 byte) data, = struct.unpack('<B', f.read(1)) header[\"LeftRightConvention\"] = data #", "V16 format # for a version storing each intensity value", "data)) # Transformation values are stored as a list trans_values", "\"2\". # Expected binary data: short int (2 bytes) data,", "stored. The # \"type of transformation\" is a value determining", "data data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionInTALmm\"] = data # Expected", "3D data sets, # typically containing the whole brain (head)", "data set (\"history record\"). The # post-header data can be", ">= 3: # NOTE(Developer Guide 2.6): These four entries have", "of VMR file. data_img : numpy.array, 3D Image. \"\"\" with", "to write Brainvoyager VMR file. Parameters ---------- filename : string", "identical to file version \"2\". # Expected binary data: short", "labels are in terms of # BrainVoyager's internal format. These", "position information (if available) # and stores a series of", "= data # Slice column direction vector Z component #", "storing each intensity value with two bytes (short # integers).", "i in range(header[\"NrOfPastSpatialTransformations\"]): header[\"PastTransformation\"].append(dict()) # Expected binary data: variable-length string", "format.\"\"\" import struct import numpy as np from bvbabel.utils import", "data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionVerified\"] = data data, = struct.unpack('<B',", "from # DICOM files. The coordinate axes labels in these", "= data # First slice center Y coordinate data, =", "BV to Tal # Expected binary data: unsigned char (1", "header[\"PastTransformation\"][i][\"Name\"] = data # Expected binary data: int (4 bytes)", "= header[\"OffsetX\"] f.write(struct.pack('<h', data)) data = header[\"OffsetY\"] f.write(struct.pack('<h', data)) data", "vector X component data, = struct.unpack('<f', f.read(4)) header[\"RowDirY\"] = data", "= struct.unpack('<h', f.read(2)) header[\"OffsetX\"] = data data, = struct.unpack('<h', f.read(2))", "files contain anatomical 3D data sets, # typically containing the", "file version \"1\", file versions 2 and higher contain #", "in 1 byte. The data is organized in three loops:", "data, = struct.unpack('<f', f.read(4)) header[\"RowDirY\"] = data # Slice row", "data, = struct.unpack('<f', f.read(4)) trans_values.append(data) header[\"PastTransformation\"][i][\"Values\"] = trans_values # Expected", "# \"2\": Affine transformation (16 values, 4x4 matrix) # \"4\":", "1 byte. The data is organized in three loops: #", "value) is # represented in 1 byte. The data is", "file version \"3\" with BrainVoyager QX 1.7. All other entries", "post-header data can be probably ignored for custom routines, but", "# checking if the extended information is not needed. The", "float (4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeX\"] = data", "f.read(4)) header[\"SliceNCenterY\"] = data # Last slice center Y coordinate", "data: unsigned short int (2 bytes) data = header[\"File version\"]", "# # The axes terminology follows the internal BrainVoyager (BV)", "bytes) f.write(struct.pack('<f', trans_values[j])) # Expected binary data: char (1 byte)", "standard data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes", "allows to read VMR data sets with minimal header #", "f.write(struct.pack('<f', data)) data = header[\"ColDirZ\"] f.write(struct.pack('<f', data)) # Expected binary", "= struct.unpack('<f', f.read(4)) header[\"VoxelSizeY\"] = data # Voxel resolution along", "data = header[\"PosInfosVerified\"] f.write(struct.pack('<i', data)) data = header[\"CoordinateSystem\"] f.write(struct.pack('<i', data))", "range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data: float (4 bytes) data, =", "= struct.unpack('<f', f.read(4)) header[\"Slice1CenterY\"] = data # First slice center", "table is stored. The # \"type of transformation\" is a", "read_variable_length_string(f) header[\"PastTransformation\"][i][\"Name\"] = data # Expected binary data: int (4", "data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"Type\"] = data # Expected binary", "# Expected binary data: variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"SourceFileName\"]", "offset values for # each dimension as well as a", "f.write(struct.pack('<i', data)) data = header[\"NCols\"] f.write(struct.pack('<i', data)) # Expected binary", "= data # Gap thickness [mm] # Expected binary data:", "data # Slice column direction vector Y component data, =", "= header[\"PosInfosVerified\"] f.write(struct.pack('<i', data)) data = header[\"CoordinateSystem\"] f.write(struct.pack('<i', data)) #", "Expected binary data: char (1 byte) data, = struct.unpack('<B', f.read(1))", "slice center Z coordinate data, = struct.unpack('<f', f.read(4)) header[\"RowDirX\"] =", "v4 data, = struct.unpack('<B', f.read(1)) header[\"ReferenceSpaceVMR\"] = data # new", "as a list trans_values = header[\"PastTransformation\"][i][\"Values\"] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]):", "(4 bytes) data = header[\"VMROrigV16MinValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MeanValue\"]", "= data # --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- #", "(as indicated below). Version 3 added offset values to #", "header[\"NrOfPastSpatialTransformations\"] != 0: # NOTE(Developer Guide 2.6): For each past", "# normalization) and voxel resolution. if header[\"File version\"] >= 3:", "Nr of rows of slice image matrix data, = struct.unpack('<i',", "header[\"NrOfPastSpatialTransformations\"] f.write(struct.pack('<i', data)) if header[\"NrOfPastSpatialTransformations\"] != 0: for i in", "# coregistration routines as well as for proper visualization. #", "X component data, = struct.unpack('<f', f.read(4)) header[\"RowDirY\"] = data #", "= struct.unpack('<f', f.read(4)) header[\"ColDirY\"] = data # Slice column direction", "trans_values = header[\"PastTransformation\"][i][\"Values\"] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary", "data = header[\"PastTransformation\"][i][\"NrOfValues\"] f.write(struct.pack('<i', data)) # Transformation values are stored", "Tal data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes", "# Expected binary data: int (4 bytes) data = header[\"PastTransformation\"][i][\"Type\"]", "axes from Nifti standard back to BV standard data_img =", "binary data: float (4 bytes) data = header[\"Slice1CenterX\"] f.write(struct.pack('<f', data))", "is only slightly different from # version 3 (as indicated", "(1 byte) data_img = data_img.flatten() for i in range(data_img.size): f.write(struct.pack('<B',", "axes data_img = np.transpose(data_img, (0, 2, 1)) # BV to", "row direction vector X component data, = struct.unpack('<f', f.read(4)) header[\"RowDirY\"]", "data)) data = header[\"FoVCols\"] f.write(struct.pack('<f', data)) data = header[\"SliceThickness\"] f.write(struct.pack('<f',", "bytes) data, = struct.unpack('<i', f.read(4)) header[\"NrOfPastSpatialTransformations\"] = data if header[\"NrOfPastSpatialTransformations\"]", "will be internally \"expanded\" # for certain operations. The axes", "view extent in row direction [mm] data, = struct.unpack('<f', f.read(4))", "to Tal # Expected binary data: unsigned char (1 byte)", "= header[\"VoxelSizeY\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeZ\"] f.write(struct.pack('<f', data)) #", "Post-Data Header # --------------------------------------------------------------------- if header[\"File version\"] >= 3: #", "routines, but is # important in BrainVoyager QX for spatial", "# information specified in the following table is stored. The", "along X axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeY\"] = data", "np.transpose(data_img, (0, 2, 1)) # BV to Tal data_img =", "data set will be internally \"expanded\" # for certain operations.", "different from # version 3 (as indicated below). Version 3", "data sets, # typically containing the whole brain (head) of", "bytes) data = header[\"PosInfosVerified\"] f.write(struct.pack('<i', data)) data = header[\"CoordinateSystem\"] f.write(struct.pack('<i',", "data)) data = header[\"RowDirZ\"] f.write(struct.pack('<f', data)) data = header[\"ColDirX\"] f.write(struct.pack('<f',", "= header[\"DimY\"] f.write(struct.pack('<H', data)) data = header[\"DimZ\"] f.write(struct.pack('<H', data)) #", "header[\"VMROrigV16MinValue\"] = data # 16-bit data min intensity data, =", "have been eventually performed to create the # current VMR", "data set, including the assumed # left-right convention, the reference", "Expected binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4))", "four entries of the post-data # header are new since", "Post-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): The first", "data = header[\"RowDirX\"] f.write(struct.pack('<f', data)) data = header[\"RowDirY\"] f.write(struct.pack('<f', data))", "= header[\"NrOfPastSpatialTransformations\"] f.write(struct.pack('<i', data)) if header[\"NrOfPastSpatialTransformations\"] != 0: for i", "# scan position information from the original file headers, e.g.", "struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"NrOfValues\"] = data # Store transformation values as", "[mm] data, = struct.unpack('<f', f.read(4)) header[\"SliceThickness\"] = data # Slice", "filename. header : dictionary Header of VMR file. data_img :", "# The axes terminology follows the internal BrainVoyager (BV) format.", "values are stored as a list trans_values = header[\"PastTransformation\"][i][\"Values\"] for", "have been # performed to the original data set (\"history", "binary data: float (4 bytes) data = header[\"FoVRows\"] f.write(struct.pack('<f', data))", "struct.unpack('<h', f.read(2)) header[\"OffsetY\"] = data data, = struct.unpack('<h', f.read(2)) header[\"OffsetZ\"]", "extensive, header. The current # version of VMR files is", "data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeX\"] = data # Voxel resolution", "[mm] # Expected binary data: int (4 bytes) data, =", "as well as a value indicating the size of a", "= struct.unpack('<H', f.read(2)) header[\"DimY\"] = data data, = struct.unpack('<H', f.read(2))", "data # --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # NOTE(Developer", "Y coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterZ\"] = data #", "header[\"OffsetY\"] f.write(struct.pack('<h', data)) data = header[\"OffsetZ\"] f.write(struct.pack('<h', data)) data =", "2.6): The first four entries of the post-data # header", "bytes) data, = struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"NrOfValues\"] = data # Store", "transformation (1 - 5 -> BV axes) header[\"PastTransformation\"] = []", "the context of advanced segmentation processing. Compared to the #", "bytes) data, = struct.unpack('<f', f.read(4)) trans_values.append(data) header[\"PastTransformation\"][i][\"Values\"] = trans_values #", "The information # in the post-data header contains position information", "f.write(struct.pack('<H', data)) data = header[\"DimY\"] f.write(struct.pack('<H', data)) data = header[\"DimZ\"]", "2 after np.reshape] = Y in Tal space # BV", "if header[\"File version\"] >= 3: # Expected binary data: short", "data can be probably ignored for custom routines, but is", "f.read(1)) header[\"LeftRightConvention\"] = data # modified in v4 data, =", "(4 bytes) data, = struct.unpack('<f', f.read(4)) trans_values.append(data) header[\"PastTransformation\"][i][\"Values\"] = trans_values", "First slice center Z coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterX\"]", "header[\"File version\"] >= 3: # NOTE(Developer Guide 2.6): These four", "variable-length string data = header[\"PastTransformation\"][i][\"SourceFileName\"] write_variable_length_string(f, data) # Expected binary", "range(header[\"NrOfPastSpatialTransformations\"]): header[\"PastTransformation\"].append(dict()) # Expected binary data: variable-length string data =", "struct.unpack('<f', f.read(4)) header[\"SliceNCenterX\"] = data # Last slice center X", "f.write(struct.pack('<f', data)) data = header[\"SliceNCenterY\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterZ\"]", "represented in 1 byte. The data is organized in three", "binary data: variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"Name\"] = data", "= struct.unpack('<f', f.read(4)) header[\"VoxelSizeX\"] = data # Voxel resolution along", "resolution. if header[\"File version\"] >= 3: # NOTE(Developer Guide 2.6):", "Brainvoyager VMR file. Parameters ---------- filename : string Path to", "= header[\"Slice1CenterY\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterZ\"] f.write(struct.pack('<f', data)) data", "data = header[\"VoxelResolutionVerified\"] f.write(struct.pack('<B', data)) data = header[\"VoxelResolutionInTALmm\"] f.write(struct.pack('<B', data))", "translation, 3 rotation, 3 scale) # \"2\": Affine transformation (16", "struct.unpack('<h', f.read(2)) header[\"OffsetX\"] = data data, = struct.unpack('<h', f.read(2)) header[\"OffsetY\"]", "(0, 2, 1)) # BV to Tal data_img = data_img[::-1,", "to create the # current VMR (e.g. ACPC transformation). Finally,", "# header\"). This allows to read VMR data sets with", "# integers). The VMR format contains a small header followed", "data = header[\"PastTransformation\"][i][\"Name\"] write_variable_length_string(f, data) # Expected binary data: int", "visualization. # Expected binary data: unsigned short int (2 bytes)", "header[\"VoxelSizeY\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeZ\"] f.write(struct.pack('<f', data)) # Expected", "by a second, more extensive, header. The current # version", "center Y coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterZ\"] = data", "values to # format 2 in order to represent large", "to which the data set will be internally \"expanded\" #", "Header # --------------------------------------------------------------------- if header[\"File version\"] >= 3: # Expected", "actual data followed by a second, more extensive, header. The", "containing the whole brain (head) of subjects. The # intensity", "current # version of VMR files is \"4\", which is", "for i in range(header[\"NrOfPastSpatialTransformations\"]): # Expected binary data: variable-length string", "Image data. \"\"\" header = dict() with open(filename, 'rb') as", "= header[\"OffsetY\"] f.write(struct.pack('<h', data)) data = header[\"OffsetZ\"] f.write(struct.pack('<h', data)) data", "header[\"NrOfPastSpatialTransformations\"] != 0: for i in range(header[\"NrOfPastSpatialTransformations\"]): # Expected binary", "descries the data set, including the assumed # left-right convention,", "for i in range(data_img.size): data_img[i], = struct.unpack('<B', f.read(1)) data_img =", "= header[\"PastTransformation\"][i][\"Values\"] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data:", "the # actual data followed by a second, more extensive,", "* header[\"DimX\"]), dtype=\"<B\") for i in range(data_img.size): data_img[i], = struct.unpack('<B',", "= Z in Tal space # BV (Z left ->", "header[\"NCols\"] f.write(struct.pack('<i', data)) # Expected binary data: float (4 bytes)", "-> back) [axis 2 after np.reshape] = Y in Tal", "data)) data = header[\"ColDirZ\"] f.write(struct.pack('<f', data)) # Expected binary data:", "data = header[\"VoxelResolutionInTALmm\"] f.write(struct.pack('<B', data)) # Expected binary data: int", "from bvbabel.utils import (read_variable_length_string, write_variable_length_string) # ============================================================================= def read_vmr(filename): \"\"\"Read", "--------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # Convert axes from", "data_img[i])) # --------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- if", "# VMR Post-Data Header # --------------------------------------------------------------------- if header[\"File version\"] >=", "= header[\"FoVCols\"] f.write(struct.pack('<f', data)) data = header[\"SliceThickness\"] f.write(struct.pack('<f', data)) data", "= header[\"VMROrigV16MinValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MeanValue\"] f.write(struct.pack('<i', data)) data", "--------------------------------------------------------------------- # VMR Pre-Data Header # --------------------------------------------------------------------- # Expected binary", "header[\"DimX\"])) data_img = np.transpose(data_img, (0, 2, 1)) # BV to", "\"5\": Un-Talairach transformation (1 - 5 -> BV axes) header[\"PastTransformation\"]", "header[\"Slice1CenterZ\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterX\"] f.write(struct.pack('<f', data)) data =", "is not needed. The information # in the post-data header", "\"\"\"Read Brainvoyager VMR file. Parameters ---------- filename : string Path", "typically containing the whole brain (head) of subjects. The #", "is # important in BrainVoyager QX for spatial transformation and", "struct.unpack('<i', f.read(4)) header[\"VMROrigV16MinValue\"] = data # 16-bit data min intensity", "new since file version \"3\" and contain offset values for", "conventions but follow the DICOM # standard. Then follows eventually", "= struct.unpack('<f', f.read(4)) header[\"Slice1CenterX\"] = data # First slice center", "version\"] >= 3: # NOTE(Developer Guide 2.6): These four entries", "# BrainVoyager's internal format. These four entries are followed by", "data: variable-length string data = header[\"PastTransformation\"][i][\"SourceFileName\"] write_variable_length_string(f, data) # Expected", "--------------------------------------------------------------------- # VMR Pre-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide", "= struct.unpack('<B', f.read(1)) header[\"LeftRightConvention\"] = data # modified in v4", "in column dir. [mm] data, = struct.unpack('<f', f.read(4)) header[\"SliceThickness\"] =", "data = header[\"LeftRightConvention\"] f.write(struct.pack('<B', data)) data = header[\"ReferenceSpaceVMR\"] f.write(struct.pack('<B', data))", "(Y top -> bottom) [axis 1 after np.reshape] = Z", "as well as for proper visualization. # Expected binary data:", ": dictionary Pre-data and post-data headers. data : 3D numpy.array", "header. The current # version of VMR files is \"4\",", "data data, = struct.unpack('<H', f.read(2)) header[\"DimZ\"] = data # ---------------------------------------------------------------------", "data, = struct.unpack('<h', f.read(2)) header[\"OffsetX\"] = data data, = struct.unpack('<h',", "header[\"CoordinateSystem\"] f.write(struct.pack('<i', data)) # Expected binary data: float (4 bytes)", "data = header[\"ColDirZ\"] f.write(struct.pack('<f', data)) # Expected binary data: int", "byte) data_img = np.zeros((header[\"DimZ\"] * header[\"DimY\"] * header[\"DimX\"]), dtype=\"<B\") for", "(header[\"DimZ\"], header[\"DimY\"], header[\"DimX\"])) data_img = np.transpose(data_img, (0, 2, 1)) #", "intensity return header, data_img # ============================================================================= def write_vmr(filename, header, data_img):", "data if header[\"NrOfPastSpatialTransformations\"] != 0: # NOTE(Developer Guide 2.6): For", "f.read(4)) header[\"RowDirX\"] = data # Slice row direction vector X", "dictionary Header of VMR file. data_img : numpy.array, 3D Image.", "important in BrainVoyager QX for spatial transformation and # coregistration", "f: # --------------------------------------------------------------------- # VMR Pre-Data Header # --------------------------------------------------------------------- #", "set, including the assumed # left-right convention, the reference space", "in order to represent large data sets efficiently, e.g. in", "DICOM # standard. Then follows eventually a section listing spatial", "and stores a series of spatial transformations, which might have", "bytes) data, = struct.unpack('<i', f.read(4)) header[\"NRows\"] = data # Nr", "the extended information is not needed. The information # in", "are stored as a list trans_values = header[\"PastTransformation\"][i][\"Values\"] for j", "All other entries are # identical to file version \"2\".", "struct.unpack('<f', f.read(4)) header[\"Slice1CenterY\"] = data # First slice center Y", "below). Version 3 added offset values to # format 2", "Expected binary data: float (4 bytes) data = header[\"FoVRows\"] f.write(struct.pack('<f',", "for # each dimension as well as a value indicating", "Guide 2.6): For each past transformation, the # information specified", "numpy.array, 3D Image. \"\"\" with open(filename, 'wb') as f: #", "header[\"ColDirZ\"] = data # Slice column direction vector Z component", "left-right convention, the reference space (e.g. Talairach after # normalization)", "bytes) data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MinValue\"] = data # 16-bit", "labels in these entries are not in # terms of", "header[\"VoxelResolutionVerified\"] = data data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionInTALmm\"] = data", "four entries are followed by # scan position information from", "header[\"PastTransformation\"].append(dict()) # Expected binary data: variable-length string data = read_variable_length_string(f)", "X component data, = struct.unpack('<f', f.read(4)) header[\"ColDirY\"] = data #", "data sets with minimal header # checking if the extended", "component # Expected binary data: int (4 bytes) data, =", "char (1 byte) data_img = data_img.flatten() for i in range(data_img.size):", "BV to Tal data_img = data_img[::-1, ::-1, ::-1] # Flip", "the transformation: # \"1\": Rigid body+scale (3 translation, 3 rotation,", "= struct.unpack('<f', f.read(4)) header[\"SliceThickness\"] = data # Slice thickness [mm]", "component data, = struct.unpack('<f', f.read(4)) header[\"RowDirY\"] = data # Slice", "BrainVoyager QX for spatial transformation and # coregistration routines as", "data = header[\"NRows\"] f.write(struct.pack('<i', data)) data = header[\"NCols\"] f.write(struct.pack('<i', data))", "struct.unpack('<B', f.read(1)) header[\"LeftRightConvention\"] = data # modified in v4 data,", "Expected binary data: float (4 bytes) data = header[\"VoxelSizeX\"] f.write(struct.pack('<f',", "along Y axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeZ\"] = data", "2.6): VMR files contain anatomical 3D data sets, # typically", "DICOM files. The coordinate axes labels in these entries are", "stores a series of spatial transformations, which might have been", "= data # Nr of columns of slice image matrix", "# Expected binary data: variable-length string data = header[\"PastTransformation\"][i][\"Name\"] write_variable_length_string(f,", "these entries are not in # terms of BrainVoyager's internal", "four entries have been added in # file version \"3\"", "columns of slice image matrix # Expected binary data: float", "the assumed # left-right convention, the reference space (e.g. Talairach", "f.write(struct.pack('<i', data)) data = header[\"CoordinateSystem\"] f.write(struct.pack('<i', data)) # Expected binary", "header[\"CoordinateSystem\"] = data # Expected binary data: float (4 bytes)", "struct.unpack('<i', f.read(4)) header[\"VMROrigV16MaxValue\"] = data # 16-bit data max intensity", "# Expected binary data: int (4 bytes) data = header[\"VMROrigV16MinValue\"]", "dtype=\"<B\") for i in range(data_img.size): data_img[i], = struct.unpack('<B', f.read(1)) data_img", "the following table is stored. The # \"type of transformation\"", "f.read(4)) header[\"CoordinateSystem\"] = data # Expected binary data: float (4", "in v4 data, = struct.unpack('<B', f.read(1)) header[\"ReferenceSpaceVMR\"] = data #", "f.read(4)) header[\"NCols\"] = data # Nr of columns of slice", "header[\"GapThickness\"] = data # Gap thickness [mm] # Expected binary", "= data_img.flatten() for i in range(data_img.size): f.write(struct.pack('<B', data_img[i])) # ---------------------------------------------------------------------", "header[\"VoxelResolutionVerified\"] f.write(struct.pack('<B', data)) data = header[\"VoxelResolutionInTALmm\"] f.write(struct.pack('<B', data)) # Expected", "f.write(struct.pack('<B', data_img[i])) # --------------------------------------------------------------------- # VMR Post-Data Header # ---------------------------------------------------------------------", "f.write(struct.pack('<f', data)) # Expected binary data: int (4 bytes) data", "of subjects. The # intensity values are stored as a", "header[\"VoxelSizeZ\"] f.write(struct.pack('<f', data)) # Expected binary data: char (1 byte)", "unsigned char (1 byte) data_img = np.zeros((header[\"DimZ\"] * header[\"DimY\"] *", "DimZ # DimY # DimX # # The axes terminology", "binary data: int (4 bytes) data = header[\"VMROrigV16MinValue\"] f.write(struct.pack('<i', data))", "Guide 2.6): The first four entries of the post-data #", "= struct.unpack('<f', f.read(4)) header[\"VoxelSizeZ\"] = data # Voxel resolution along", "header[\"Slice1CenterX\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterY\"] f.write(struct.pack('<f', data)) data =", "= header[\"NRows\"] f.write(struct.pack('<i', data)) data = header[\"NCols\"] f.write(struct.pack('<i', data)) #", "data)) data = header[\"RowDirX\"] f.write(struct.pack('<f', data)) data = header[\"RowDirY\"] f.write(struct.pack('<f',", "component data, = struct.unpack('<f', f.read(4)) header[\"ColDirY\"] = data # Slice", "in row direction [mm] data, = struct.unpack('<f', f.read(4)) header[\"FoVCols\"] =", "operations. The axes labels are in terms of # BrainVoyager's", "data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterX\"] = data # First slice", "f.read(4)) header[\"FoVRows\"] = data # Field of view extent in", "header[\"PastTransformation\"][i][\"NrOfValues\"] = data # Store transformation values as a list", "f.write(struct.pack('<H', data)) data = header[\"DimX\"] f.write(struct.pack('<H', data)) data = header[\"DimY\"]", "binary data: int (4 bytes) data = header[\"PosInfosVerified\"] f.write(struct.pack('<i', data))", "in v4 # Expected binary data: float (4 bytes) data,", "data)) data = header[\"GapThickness\"] f.write(struct.pack('<f', data)) # Expected binary data:", "Flip BV axes data_img = np.transpose(data_img, (0, 2, 1)) #", "f.write(struct.pack('<h', data)) data = header[\"OffsetY\"] f.write(struct.pack('<h', data)) data = header[\"OffsetZ\"]", "Expected binary data: unsigned short int (2 bytes) data, =", "f.read(4)) header[\"NRows\"] = data # Nr of rows of slice", "slice center Y coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterZ\"] =", "from # version 3 (as indicated below). Version 3 added", "f.read(2)) header[\"OffsetY\"] = data data, = struct.unpack('<h', f.read(2)) header[\"OffsetZ\"] =", "Last slice center Z coordinate data, = struct.unpack('<f', f.read(4)) header[\"RowDirX\"]", "1)) # BV to Tal # Expected binary data: unsigned", "(head) of subjects. The # intensity values are stored as", "= data_img[::-1, ::-1, ::-1] # Flip BV axes # ---------------------------------------------------------------------", "= data # 16-bit data max intensity return header, data_img", "information further descries the data set, including the assumed #", "(2 bytes) data, = struct.unpack('<h', f.read(2)) header[\"OffsetX\"] = data data,", "f.read(4)) header[\"GapThickness\"] = data # Gap thickness [mm] # Expected", "be probably ignored for custom routines, but is # important", "= data data, = struct.unpack('<i', f.read(4)) header[\"CoordinateSystem\"] = data #", "axes # --------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- #", "direction vector X component data, = struct.unpack('<f', f.read(4)) header[\"RowDirY\"] =", "data)) data = header[\"ColDirY\"] f.write(struct.pack('<f', data)) data = header[\"ColDirZ\"] f.write(struct.pack('<f',", "column direction vector Z component # Expected binary data: int", "in these entries are not in # terms of BrainVoyager's", "unsigned char (1 byte) data_img = data_img.flatten() for i in", "data, = struct.unpack('<H', f.read(2)) header[\"DimX\"] = data data, = struct.unpack('<H',", "convention, the reference space (e.g. Talairach after # normalization) and", "# Slice column direction vector Y component data, = struct.unpack('<f',", "the # original file version \"1\", file versions 2 and", "header[\"RowDirX\"] f.write(struct.pack('<f', data)) data = header[\"RowDirY\"] f.write(struct.pack('<f', data)) data =", "data = header[\"NCols\"] f.write(struct.pack('<i', data)) # Expected binary data: float", "(4 bytes) data = header[\"PastTransformation\"][i][\"Type\"] f.write(struct.pack('<i', data)) # Expected binary", "write_variable_length_string) # ============================================================================= def read_vmr(filename): \"\"\"Read Brainvoyager VMR file. Parameters", ": 3D numpy.array Image data. \"\"\" header = dict() with", "Pre-Data Header # --------------------------------------------------------------------- # Expected binary data: unsigned short", "(4 bytes) data = header[\"NrOfPastSpatialTransformations\"] f.write(struct.pack('<i', data)) if header[\"NrOfPastSpatialTransformations\"] !=", "::-1] # Flip BV axes # --------------------------------------------------------------------- # VMR Post-Data", "= header[\"SliceThickness\"] f.write(struct.pack('<f', data)) data = header[\"GapThickness\"] f.write(struct.pack('<f', data)) #", "float (4 bytes) data = header[\"VoxelSizeX\"] f.write(struct.pack('<f', data)) data =", "= header[\"ColDirX\"] f.write(struct.pack('<f', data)) data = header[\"ColDirY\"] f.write(struct.pack('<f', data)) data", "# Expected binary data: variable-length string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"Name\"]", "# terms of BrainVoyager's internal conventions but follow the DICOM", "data: char (1 byte) data = header[\"VoxelResolutionVerified\"] f.write(struct.pack('<B', data)) data", "# 16-bit data min intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MeanValue\"]", "Affine transformation (16 values, 4x4 matrix) # \"4\": Talairach transformation", "contain # additional header information after the actual data (\"post-data", "= data # Voxel resolution along Z axis # Expected", "binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MinValue\"]", "filename : string Path to file. Returns ------- header :", "unsigned short int (2 bytes) data, = struct.unpack('<H', f.read(2)) header[\"File", "standard. Then follows eventually a section listing spatial # transformations", "past transformation, the # information specified in the following table", "f.write(struct.pack('<f', data)) data = header[\"RowDirY\"] f.write(struct.pack('<f', data)) data = header[\"RowDirZ\"]", "spatial transformation and # coregistration routines as well as for", "Pre-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): VMR files", "data)) data = header[\"FramingCubeDim\"] f.write(struct.pack('<h', data)) # Expected binary data:", "data_img[::-1, ::-1, ::-1] # Flip BV axes # --------------------------------------------------------------------- #", "np.reshape( data_img, (header[\"DimZ\"], header[\"DimY\"], header[\"DimX\"])) data_img = np.transpose(data_img, (0, 2,", "= data data, = struct.unpack('<h', f.read(2)) header[\"OffsetZ\"] = data data,", "extent in column dir. [mm] data, = struct.unpack('<f', f.read(4)) header[\"SliceThickness\"]", "Output filename. header : dictionary Header of VMR file. data_img", "terms of BrainVoyager's internal conventions but follow the DICOM #", "Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): The first four", "data, = struct.unpack('<i', f.read(4)) header[\"CoordinateSystem\"] = data # Expected binary", "# VMR Post-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6):", "# Last slice center Z coordinate data, = struct.unpack('<f', f.read(4))", "(4 bytes) data = header[\"PastTransformation\"][i][\"NrOfValues\"] f.write(struct.pack('<i', data)) # Transformation values", "# actual data followed by a second, more extensive, header.", "f.read(4)) header[\"VoxelSizeX\"] = data # Voxel resolution along X axis", "int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"NrOfPastSpatialTransformations\"] = data", "(4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"PosInfosVerified\"] = data data,", "f.write(struct.pack('<f', trans_values[j])) # Expected binary data: char (1 byte) data", "header[\"FramingCubeDim\"] = data # Expected binary data: int (4 bytes)", "short int (2 bytes) data, = struct.unpack('<H', f.read(2)) header[\"File version\"]", "data = header[\"RowDirY\"] f.write(struct.pack('<f', data)) data = header[\"RowDirZ\"] f.write(struct.pack('<f', data))", "data)) data = header[\"ColDirX\"] f.write(struct.pack('<f', data)) data = header[\"ColDirY\"] f.write(struct.pack('<f',", "short int (2 bytes) data = header[\"OffsetX\"] f.write(struct.pack('<h', data)) data", "(4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MinValue\"] = data #", "files. The coordinate axes labels in these entries are not", "VMR files contain anatomical 3D data sets, # typically containing", "-> BV axes) header[\"PastTransformation\"] = [] for i in range(header[\"NrOfPastSpatialTransformations\"]):", "f.read(2)) header[\"FramingCubeDim\"] = data # Expected binary data: int (4", "binary data: unsigned short int (2 bytes) data = header[\"File", "data: int (4 bytes) data = header[\"PastTransformation\"][i][\"Type\"] f.write(struct.pack('<i', data)) #", "float (4 bytes) data, = struct.unpack('<f', f.read(4)) trans_values.append(data) header[\"PastTransformation\"][i][\"Values\"] =", "bytes) data = header[\"PastTransformation\"][i][\"NrOfValues\"] f.write(struct.pack('<i', data)) # Transformation values are", "\"\"\" with open(filename, 'wb') as f: # --------------------------------------------------------------------- # VMR", "in Tal space # BV (Y top -> bottom) [axis", "= data # Slice row direction vector Y component data,", "dir. [mm] data, = struct.unpack('<f', f.read(4)) header[\"SliceThickness\"] = data #", "Y component data, = struct.unpack('<f', f.read(4)) header[\"ColDirZ\"] = data #", "bytes) data = header[\"FoVRows\"] f.write(struct.pack('<f', data)) data = header[\"FoVCols\"] f.write(struct.pack('<f',", "# Nr of rows of slice image matrix data, =", "= data # 16-bit data mean intensity data, = struct.unpack('<i',", "Brainvoyager VMR file format.\"\"\" import struct import numpy as np", "after np.reshape] = Y in Tal space # BV (Y", "of slice image matrix # Expected binary data: float (4", "header[\"NCols\"] = data # Nr of columns of slice image", "f.read(1)) header[\"ReferenceSpaceVMR\"] = data # new in v4 # Expected", "header[\"VMROrigV16MeanValue\"] = data # 16-bit data mean intensity data, =", "string data = header[\"PastTransformation\"][i][\"SourceFileName\"] write_variable_length_string(f, data) # Expected binary data:", "eventually a section listing spatial # transformations which have been", "f.write(struct.pack('<f', data)) data = header[\"SliceNCenterZ\"] f.write(struct.pack('<f', data)) data = header[\"RowDirX\"]", "= [] for i in range(header[\"NrOfPastSpatialTransformations\"]): header[\"PastTransformation\"].append(dict()) # Expected binary", "= struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"NrOfValues\"] = data # Store transformation values", "# for a version storing each intensity value with two", "# and stores a series of spatial transformations, which might", "binary data: char (1 byte) data = header[\"VoxelResolutionVerified\"] f.write(struct.pack('<B', data))", "with BrainVoyager QX 1.7. All other entries are # identical", "of the post-data # header are new since file version", "f.read(2)) header[\"DimY\"] = data data, = struct.unpack('<H', f.read(2)) header[\"DimZ\"] =", "data)) data = header[\"Slice1CenterY\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterZ\"] f.write(struct.pack('<f',", "data = header[\"DimY\"] f.write(struct.pack('<H', data)) data = header[\"DimZ\"] f.write(struct.pack('<H', data))", "= header[\"VoxelResolutionInTALmm\"] f.write(struct.pack('<B', data)) # Expected binary data: int (4", "to the # original file version \"1\", file versions 2", "performed to create the # current VMR (e.g. ACPC transformation).", "16-bit data mean intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MaxValue\"] =", "--------------------------------------------------------------------- # NOTE(Developer Guide 2.6): Each data element (intensity value)", "from the original file headers, e.g. from # DICOM files.", "header[\"FoVRows\"] f.write(struct.pack('<f', data)) data = header[\"FoVCols\"] f.write(struct.pack('<f', data)) data =", "version\"] = data data, = struct.unpack('<H', f.read(2)) header[\"DimX\"] = data", "int (2 bytes) data = header[\"File version\"] f.write(struct.pack('<H', data)) data", "original file headers, e.g. from # DICOM files. The coordinate", "data, = struct.unpack('<H', f.read(2)) header[\"DimY\"] = data data, = struct.unpack('<H',", "header[\"GapThickness\"] f.write(struct.pack('<f', data)) # Expected binary data: int (4 bytes)", "data # Nr of rows of slice image matrix data,", "data = header[\"NrOfPastSpatialTransformations\"] f.write(struct.pack('<i', data)) if header[\"NrOfPastSpatialTransformations\"] != 0: for", "transformation, the # information specified in the following table is", "# ============================================================================= def read_vmr(filename): \"\"\"Read Brainvoyager VMR file. Parameters ----------", "# BV (X front -> back) [axis 2 after np.reshape]", "5 -> BV axes) header[\"PastTransformation\"] = [] for i in", "data = header[\"OffsetX\"] f.write(struct.pack('<h', data)) data = header[\"OffsetY\"] f.write(struct.pack('<h', data))", "in Tal space # Expected binary data: unsigned char (1", "# typically containing the whole brain (head) of subjects. The", "binary data: unsigned char (1 byte) data_img = np.zeros((header[\"DimZ\"] *", "mean intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MaxValue\"] = data #", "f.read(4)) header[\"ColDirY\"] = data # Slice column direction vector Y", "# --------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- # NOTE(Developer", "data: short int (2 bytes) data, = struct.unpack('<h', f.read(2)) header[\"OffsetX\"]", "Pre-data and post-data headers. data : 3D numpy.array Image data.", "data = header[\"VMROrigV16MinValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MeanValue\"] f.write(struct.pack('<i', data))", "other entries are # identical to file version \"2\". #", "create Brainvoyager VMR file format.\"\"\" import struct import numpy as", "= np.zeros((header[\"DimZ\"] * header[\"DimY\"] * header[\"DimX\"]), dtype=\"<B\") for i in", "values are stored as a series of bytes. See the", "variable-length string data = header[\"PastTransformation\"][i][\"Name\"] write_variable_length_string(f, data) # Expected binary", "as np from bvbabel.utils import (read_variable_length_string, write_variable_length_string) # ============================================================================= def", "list trans_values = [] for j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected", "trans_values[j])) # Expected binary data: char (1 byte) data =", "header : dictionary Header of VMR file. data_img : numpy.array,", "(\"post-data # header\"). This allows to read VMR data sets", "header[\"VoxelSizeX\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeY\"] f.write(struct.pack('<f', data)) data =", "int (2 bytes) data, = struct.unpack('<h', f.read(2)) header[\"OffsetX\"] = data", "header\"). This allows to read VMR data sets with minimal", "data_img): \"\"\"Protocol to write Brainvoyager VMR file. Parameters ---------- filename", "np.transpose(data_img, (0, 2, 1)) # BV to Tal # Expected", "struct.unpack('<f', f.read(4)) header[\"FoVCols\"] = data # Field of view extent", "= header[\"ReferenceSpaceVMR\"] f.write(struct.pack('<B', data)) # Expected binary data: float (4", "Z component # Expected binary data: int (4 bytes) data,", "scale) # \"2\": Affine transformation (16 values, 4x4 matrix) #", "binary data: variable-length string data = header[\"PastTransformation\"][i][\"SourceFileName\"] write_variable_length_string(f, data) #", "struct.unpack('<B', f.read(1)) header[\"ReferenceSpaceVMR\"] = data # new in v4 #", "(if available) # and stores a series of spatial transformations,", "data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"SourceFileName\"] = data # Expected binary data:", "NOTE(Developer Guide 2.6): Each data element (intensity value) is #", "(16 values, 4x4 matrix) # \"4\": Talairach transformation # \"5\":", "data # Voxel resolution along Y axis data, = struct.unpack('<f',", "thickness [mm] data, = struct.unpack('<f', f.read(4)) header[\"GapThickness\"] = data #", "= header[\"PastTransformation\"][i][\"SourceFileName\"] write_variable_length_string(f, data) # Expected binary data: int (4", "\"expanded\" # for certain operations. The axes labels are in", "data: float (4 bytes) data = header[\"Slice1CenterX\"] f.write(struct.pack('<f', data)) data", "data = header[\"ColDirY\"] f.write(struct.pack('<f', data)) data = header[\"ColDirZ\"] f.write(struct.pack('<f', data))", "(4 bytes) f.write(struct.pack('<f', trans_values[j])) # Expected binary data: char (1", "data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"NrOfPastSpatialTransformations\"] =", "data, = struct.unpack('<f', f.read(4)) header[\"RowDirZ\"] = data # Slice row", "# format 2 in order to represent large data sets", "= struct.unpack('<i', f.read(4)) header[\"VMROrigV16MaxValue\"] = data # 16-bit data max", "2 in order to represent large data sets efficiently, e.g.", "data # Last slice center X coordinate data, = struct.unpack('<f',", "= struct.unpack('<f', f.read(4)) header[\"SliceNCenterZ\"] = data # Last slice center", "# represented in 1 byte. The data is organized in", "The current # version of VMR files is \"4\", which", "# Expected binary data: float (4 bytes) f.write(struct.pack('<f', trans_values[j])) #", "Expected binary data: char (1 byte) data = header[\"LeftRightConvention\"] f.write(struct.pack('<B',", "data)) data = header[\"RowDirY\"] f.write(struct.pack('<f', data)) data = header[\"RowDirZ\"] f.write(struct.pack('<f',", "= header[\"VoxelResolutionVerified\"] f.write(struct.pack('<B', data)) data = header[\"VoxelResolutionInTALmm\"] f.write(struct.pack('<B', data)) #", "# Slice row direction vector Z component data, = struct.unpack('<f',", "data # Field of view extent in row direction [mm]", "short int (2 bytes) data, = struct.unpack('<h', f.read(2)) header[\"OffsetX\"] =", "= struct.unpack('<i', f.read(4)) header[\"PosInfosVerified\"] = data data, = struct.unpack('<i', f.read(4))", "# Store transformation values as a list trans_values = []", "= struct.unpack('<h', f.read(2)) header[\"FramingCubeDim\"] = data # Expected binary data:", "int (4 bytes) data = header[\"NRows\"] f.write(struct.pack('<i', data)) data =", "\"1\": Rigid body+scale (3 translation, 3 rotation, 3 scale) #", "byte) data, = struct.unpack('<B', f.read(1)) header[\"LeftRightConvention\"] = data # modified", "of view extent in row direction [mm] data, = struct.unpack('<f',", "Nifti standard back to BV standard data_img = data_img[::-1, ::-1,", "data = header[\"CoordinateSystem\"] f.write(struct.pack('<i', data)) # Expected binary data: float", "data = header[\"Slice1CenterX\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterY\"] f.write(struct.pack('<f', data))", "slice center Z coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterX\"] =", "a version storing each intensity value with two bytes (short", "\"\"\" header = dict() with open(filename, 'rb') as f: #", "Brainvoyager VMR file. Parameters ---------- filename : string Output filename.", "DimX # # The axes terminology follows the internal BrainVoyager", "j in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data: float (4 bytes)", "= data # Store transformation values as a list trans_values", "along Z axis # Expected binary data: char (1 byte)", "data: unsigned short int (2 bytes) data, = struct.unpack('<H', f.read(2))", "header[\"Slice1CenterY\"] = data # First slice center Y coordinate data,", "resolution along Z axis # Expected binary data: char (1", "= data # Expected binary data: float (4 bytes) data,", "Transformation values are stored as a list trans_values = header[\"PastTransformation\"][i][\"Values\"]", "ACPC transformation). Finally, additional # information further descries the data", "whole brain (head) of subjects. The # intensity values are", "# important in BrainVoyager QX for spatial transformation and #", "bytes) data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeX\"] = data # Voxel", "as follows: # BV (X front -> back) [axis 2", "of spatial transformations, which might have been # performed to", "modified in v4 data, = struct.unpack('<B', f.read(1)) header[\"ReferenceSpaceVMR\"] = data", "NOTE(Developer Guide 2.6): VMR files contain anatomical 3D data sets,", "header[\"File version\"] f.write(struct.pack('<H', data)) data = header[\"DimX\"] f.write(struct.pack('<H', data)) data", "Header # --------------------------------------------------------------------- # Expected binary data: unsigned short int", "header[\"NRows\"] f.write(struct.pack('<i', data)) data = header[\"NCols\"] f.write(struct.pack('<i', data)) # Expected", "# Expected binary data: float (4 bytes) data = header[\"FoVRows\"]", "bytes) data = header[\"VMROrigV16MinValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MeanValue\"] f.write(struct.pack('<i',", "The axes labels are in terms of # BrainVoyager's internal", "follows eventually a section listing spatial # transformations which have", "int (4 bytes) data = header[\"VMROrigV16MinValue\"] f.write(struct.pack('<i', data)) data =", "VMR files is \"4\", which is only slightly different from", "value determining how many # subsequent values define the transformation:", "data data, = struct.unpack('<H', f.read(2)) header[\"DimX\"] = data data, =", "VMR data sets with minimal header # checking if the", "X in Tal space # Expected binary data: unsigned char", "header = dict() with open(filename, 'rb') as f: # ---------------------------------------------------------------------", "intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MeanValue\"] = data # 16-bit", "Voxel resolution along Z axis # Expected binary data: char", "Returns ------- header : dictionary Pre-data and post-data headers. data", "f.read(4)) header[\"SliceThickness\"] = data # Slice thickness [mm] data, =", "VMR file. Parameters ---------- filename : string Output filename. header", "string data = read_variable_length_string(f) header[\"PastTransformation\"][i][\"Name\"] = data # Expected binary", "processing. Compared to the # original file version \"1\", file", "---------- filename : string Path to file. Returns ------- header", "= header[\"DimX\"] f.write(struct.pack('<H', data)) data = header[\"DimY\"] f.write(struct.pack('<H', data)) data", "The first four entries of the post-data # header are", "header[\"ColDirY\"] f.write(struct.pack('<f', data)) data = header[\"ColDirZ\"] f.write(struct.pack('<f', data)) # Expected", "The mapping to Talairach axes is as follows: # BV", "set will be internally \"expanded\" # for certain operations. The", "which the data set will be internally \"expanded\" # for", "efficiently, e.g. in # the context of advanced segmentation processing.", "data)) data = header[\"SliceNCenterZ\"] f.write(struct.pack('<f', data)) data = header[\"RowDirX\"] f.write(struct.pack('<f',", "data # Expected binary data: variable-length string data = read_variable_length_string(f)", "data)) data = header[\"VMROrigV16MeanValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MaxValue\"] f.write(struct.pack('<i',", "NOTE(Developer Guide 2.6): For each past transformation, the # information", "well as a value indicating the size of a cube", "- 5 -> BV axes) header[\"PastTransformation\"] = [] for i", "2.6): These four entries have been added in # file", "min intensity data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MeanValue\"] = data #", "are not in # terms of BrainVoyager's internal conventions but", "f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MaxValue\"] f.write(struct.pack('<i', data)) return print(\"VMR saved.\")", "# VMR Data # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): Each", "data: unsigned char (1 byte) data_img = data_img.flatten() for i", "extent in row direction [mm] data, = struct.unpack('<f', f.read(4)) header[\"FoVCols\"]", "= data # Slice column direction vector Y component data,", "--------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide", "f.read(4)) header[\"VMROrigV16MinValue\"] = data # 16-bit data min intensity data,", "the reference space (e.g. Talairach after # normalization) and voxel", "vector Y component data, = struct.unpack('<f', f.read(4)) header[\"ColDirZ\"] = data", "record\"). The # post-header data can be probably ignored for", "header[\"FoVCols\"] = data # Field of view extent in column", "data = header[\"Slice1CenterY\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterZ\"] f.write(struct.pack('<f', data))", "# 16-bit data max intensity return header, data_img # =============================================================================", "# transformations which have been eventually performed to create the", "BrainVoyager QX 1.7. All other entries are # identical to", "if header[\"File version\"] >= 3: # NOTE(Developer Guide 2.6): These", "# \"5\": Un-Talairach transformation (1 - 5 -> BV axes)", "data)) data = header[\"SliceNCenterY\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterZ\"] f.write(struct.pack('<f',", "int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"NRows\"] = data", "The VMR format contains a small header followed by the", "struct.unpack('<i', f.read(4)) header[\"PastTransformation\"][i][\"Type\"] = data # Expected binary data: variable-length", "header[\"PastTransformation\"][i][\"SourceFileName\"] write_variable_length_string(f, data) # Expected binary data: int (4 bytes)", "integers). The VMR format contains a small header followed by", "data # Last slice center Y coordinate data, = struct.unpack('<f',", "f.read(4)) header[\"VMROrigV16MaxValue\"] = data # 16-bit data max intensity return", "NOTE(Developer Guide 2.6): These four entries have been added in", "# left-right convention, the reference space (e.g. Talairach after #", "= struct.unpack('<f', f.read(4)) trans_values.append(data) header[\"PastTransformation\"][i][\"Values\"] = trans_values # Expected binary", "data: float (4 bytes) f.write(struct.pack('<f', trans_values[j])) # Expected binary data:", "context of advanced segmentation processing. Compared to the # original", "f.read(4)) header[\"PastTransformation\"][i][\"NrOfValues\"] = data # Store transformation values as a", "data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterZ\"] = data # Last slice", "f.read(4)) header[\"Slice1CenterZ\"] = data # First slice center Z coordinate", "header[\"DimY\"] f.write(struct.pack('<H', data)) data = header[\"DimZ\"] f.write(struct.pack('<H', data)) # ---------------------------------------------------------------------", "(1 byte) data = header[\"VoxelResolutionVerified\"] f.write(struct.pack('<B', data)) data = header[\"VoxelResolutionInTALmm\"]", "::-1, ::-1] # Flip BV axes data_img = np.transpose(data_img, (0,", "data: int (4 bytes) data = header[\"PastTransformation\"][i][\"NrOfValues\"] f.write(struct.pack('<i', data)) #", "headers, e.g. from # DICOM files. The coordinate axes labels", "BrainVoyager's internal conventions but follow the DICOM # standard. Then", "# NOTE(Developer Guide 2.6): VMR files contain anatomical 3D data", "f.read(4)) header[\"ColDirZ\"] = data # Slice column direction vector Z", "byte) data = header[\"VoxelResolutionVerified\"] f.write(struct.pack('<B', data)) data = header[\"VoxelResolutionInTALmm\"] f.write(struct.pack('<B',", "in the post-data header contains position information (if available) #", "have been added in # file version \"3\" with BrainVoyager", "(4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterX\"] = data #", "# Expected binary data: int (4 bytes) data = header[\"NrOfPastSpatialTransformations\"]", "# --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): The first four entries", "# --------------------------------------------------------------------- if header[\"File version\"] >= 3: # Expected binary", "f.read(4)) header[\"RowDirY\"] = data # Slice row direction vector Y", "struct.unpack('<f', f.read(4)) header[\"Slice1CenterX\"] = data # First slice center X", "(short # integers). The VMR format contains a small header", "= data # modified in v4 data, = struct.unpack('<B', f.read(1))", "header[\"PastTransformation\"][i][\"Type\"] = data # Expected binary data: variable-length string data", "# Slice thickness [mm] data, = struct.unpack('<f', f.read(4)) header[\"GapThickness\"] =", "data)) if header[\"NrOfPastSpatialTransformations\"] != 0: for i in range(header[\"NrOfPastSpatialTransformations\"]): #", "information is not needed. The information # in the post-data", "follows: # BV (X front -> back) [axis 2 after", "header[\"SliceThickness\"] = data # Slice thickness [mm] data, = struct.unpack('<f',", "data, = struct.unpack('<B', f.read(1)) header[\"VoxelResolutionInTALmm\"] = data # Expected binary", "post-data # header are new since file version \"3\" and", "data # 16-bit data min intensity data, = struct.unpack('<i', f.read(4))", "data_img = np.zeros((header[\"DimZ\"] * header[\"DimY\"] * header[\"DimX\"]), dtype=\"<B\") for i", "# The mapping to Talairach axes is as follows: #", "# Expected binary data: short int (2 bytes) data =", "\"2\": Affine transformation (16 values, 4x4 matrix) # \"4\": Talairach", "spatial # transformations which have been eventually performed to create", "for i in range(data_img.size): f.write(struct.pack('<B', data_img[i])) # --------------------------------------------------------------------- # VMR", "data: int (4 bytes) data = header[\"PosInfosVerified\"] f.write(struct.pack('<i', data)) data", "* header[\"DimY\"] * header[\"DimX\"]), dtype=\"<B\") for i in range(data_img.size): data_img[i],", "header[\"PosInfosVerified\"] = data data, = struct.unpack('<i', f.read(4)) header[\"CoordinateSystem\"] = data", "= struct.unpack('<h', f.read(2)) header[\"OffsetZ\"] = data data, = struct.unpack('<h', f.read(2))", "= data data, = struct.unpack('<H', f.read(2)) header[\"DimX\"] = data data,", "contains a small header followed by the # actual data", "# Flip BV axes data_img = np.transpose(data_img, (0, 2, 1))", "= read_variable_length_string(f) header[\"PastTransformation\"][i][\"SourceFileName\"] = data # Expected binary data: int", "Expected binary data: unsigned short int (2 bytes) data =", "[axis 2 after np.reshape] = Y in Tal space #", "header[\"ColDirZ\"] f.write(struct.pack('<f', data)) # Expected binary data: int (4 bytes)", "a small header followed by the # actual data followed", "Path to file. Returns ------- header : dictionary Pre-data and", "= data # First slice center Z coordinate data, =", "vector Z component data, = struct.unpack('<f', f.read(4)) header[\"ColDirX\"] = data", "Rigid body+scale (3 translation, 3 rotation, 3 scale) # \"2\":", "Expected binary data: int (4 bytes) data = header[\"PosInfosVerified\"] f.write(struct.pack('<i',", "data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeZ\"] = data # Voxel resolution", "of advanced segmentation processing. Compared to the # original file", "cube with # iso-dimensions to which the data set will", "data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MaxValue\"] = data # 16-bit data", "struct.unpack('<f', f.read(4)) header[\"VoxelSizeY\"] = data # Voxel resolution along Y", "= header[\"GapThickness\"] f.write(struct.pack('<f', data)) # Expected binary data: int (4", "BV axes # --------------------------------------------------------------------- # VMR Post-Data Header # ---------------------------------------------------------------------", "Data # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): Each data element", "# --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # NOTE(Developer Guide", "char (1 byte) data = header[\"VoxelResolutionVerified\"] f.write(struct.pack('<B', data)) data =", "= struct.unpack('<f', f.read(4)) header[\"GapThickness\"] = data # Gap thickness [mm]", "data # Gap thickness [mm] # Expected binary data: int", "Gap thickness [mm] # Expected binary data: int (4 bytes)", "of slice image matrix data, = struct.unpack('<i', f.read(4)) header[\"NCols\"] =", "header[\"LeftRightConvention\"] = data # modified in v4 data, = struct.unpack('<B',", "header[\"RowDirZ\"] = data # Slice row direction vector Z component", "versions 2 and higher contain # additional header information after", "(1 byte) data_img = np.zeros((header[\"DimZ\"] * header[\"DimY\"] * header[\"DimX\"]), dtype=\"<B\")", "float (4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"FoVRows\"] = data", "f.write(struct.pack('<B', data)) data = header[\"VoxelResolutionInTALmm\"] f.write(struct.pack('<B', data)) # Expected binary", "data # new in v4 # Expected binary data: float", "format. # The mapping to Talairach axes is as follows:", "axes is as follows: # BV (X front -> back)", "= np.transpose(data_img, (0, 2, 1)) # BV to Tal data_img", "struct.unpack('<B', f.read(1)) data_img = np.reshape( data_img, (header[\"DimZ\"], header[\"DimY\"], header[\"DimX\"])) data_img", "= data # Slice row direction vector X component data,", "data: int (4 bytes) data = header[\"NrOfPastSpatialTransformations\"] f.write(struct.pack('<i', data)) if", "not in # terms of BrainVoyager's internal conventions but follow", "# Expected binary data: int (4 bytes) data, = struct.unpack('<i',", "Expected binary data: int (4 bytes) data = header[\"NRows\"] f.write(struct.pack('<i',", "X coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterY\"] = data #", "in range(header[\"PastTransformation\"][i][\"NrOfValues\"]): # Expected binary data: float (4 bytes) data,", "X coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterY\"] = data #", "Parameters ---------- filename : string Path to file. Returns -------", "data, = struct.unpack('<f', f.read(4)) header[\"FoVCols\"] = data # Field of", "binary data: int (4 bytes) data = header[\"NrOfPastSpatialTransformations\"] f.write(struct.pack('<i', data))", "= header[\"DimZ\"] f.write(struct.pack('<H', data)) # --------------------------------------------------------------------- # VMR Data #", "data = header[\"SliceNCenterX\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterY\"] f.write(struct.pack('<f', data))", "f.read(2)) header[\"DimZ\"] = data # --------------------------------------------------------------------- # VMR Data #", "data)) data = header[\"VoxelResolutionInTALmm\"] f.write(struct.pack('<B', data)) # Expected binary data:", "matrix data, = struct.unpack('<i', f.read(4)) header[\"NCols\"] = data # Nr", "of # BrainVoyager's internal format. These four entries are followed", "proper visualization. # Expected binary data: unsigned short int (2", "header[\"File version\"] = data data, = struct.unpack('<H', f.read(2)) header[\"DimX\"] =", "transformations which have been eventually performed to create the #", "= header[\"OffsetZ\"] f.write(struct.pack('<h', data)) data = header[\"FramingCubeDim\"] f.write(struct.pack('<h', data)) #", "# Expected binary data: int (4 bytes) data = header[\"NRows\"]", "by the # actual data followed by a second, more", "f.write(struct.pack('<f', data)) data = header[\"Slice1CenterY\"] f.write(struct.pack('<f', data)) data = header[\"Slice1CenterZ\"]", "coordinate axes labels in these entries are not in #", "files is \"4\", which is only slightly different from #", "f.read(4)) header[\"ColDirX\"] = data # Slice column direction vector X", "direction vector X component data, = struct.unpack('<f', f.read(4)) header[\"ColDirY\"] =", "vector Y component data, = struct.unpack('<f', f.read(4)) header[\"RowDirZ\"] = data", "value indicating the size of a cube with # iso-dimensions", "segmentation processing. Compared to the # original file version \"1\",", "header[\"SliceNCenterX\"] = data # Last slice center X coordinate data,", "struct.unpack('<f', f.read(4)) header[\"ColDirX\"] = data # Slice column direction vector", "data, = struct.unpack('<i', f.read(4)) header[\"NRows\"] = data # Nr of", "organized in three loops: # DimZ # DimY # DimX", "file headers, e.g. from # DICOM files. The coordinate axes", "3D Image. \"\"\" with open(filename, 'wb') as f: # ---------------------------------------------------------------------", "header[\"DimY\"] * header[\"DimX\"]), dtype=\"<B\") for i in range(data_img.size): data_img[i], =", "data: float (4 bytes) data = header[\"FoVRows\"] f.write(struct.pack('<f', data)) data", "file. Parameters ---------- filename : string Output filename. header :", "are new since file version \"3\" and contain offset values", "axes labels are in terms of # BrainVoyager's internal format.", "two bytes (short # integers). The VMR format contains a", "header[\"VoxelResolutionInTALmm\"] = data # Expected binary data: int (4 bytes)", "= data # Slice thickness [mm] data, = struct.unpack('<f', f.read(4))", "and post-data headers. data : 3D numpy.array Image data. \"\"\"", "data # modified in v4 data, = struct.unpack('<B', f.read(1)) header[\"ReferenceSpaceVMR\"]", "a cube with # iso-dimensions to which the data set", "bottom) [axis 1 after np.reshape] = Z in Tal space", "data is organized in three loops: # DimZ # DimY", "= struct.unpack('<f', f.read(4)) header[\"ColDirX\"] = data # Slice column direction", "subsequent values define the transformation: # \"1\": Rigid body+scale (3", "!= 0: # NOTE(Developer Guide 2.6): For each past transformation,", "data data, = struct.unpack('<i', f.read(4)) header[\"CoordinateSystem\"] = data # Expected", "= header[\"Slice1CenterZ\"] f.write(struct.pack('<f', data)) data = header[\"SliceNCenterX\"] f.write(struct.pack('<f', data)) data", "# Slice row direction vector X component data, = struct.unpack('<f',", "data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes #", "header[\"DimY\"], header[\"DimX\"])) data_img = np.transpose(data_img, (0, 2, 1)) # BV", "data: float (4 bytes) data = header[\"VoxelSizeX\"] f.write(struct.pack('<f', data)) data", "= struct.unpack('<f', f.read(4)) header[\"FoVCols\"] = data # Field of view", "Last slice center Y coordinate data, = struct.unpack('<f', f.read(4)) header[\"SliceNCenterZ\"]", "are # identical to file version \"2\". # Expected binary", "Tal space # BV (Y top -> bottom) [axis 1", "and voxel resolution. if header[\"File version\"] >= 3: # NOTE(Developer", "in the following table is stored. The # \"type of", "file versions 2 and higher contain # additional header information", "data = header[\"ColDirX\"] f.write(struct.pack('<f', data)) data = header[\"ColDirY\"] f.write(struct.pack('<f', data))", "= header[\"NCols\"] f.write(struct.pack('<i', data)) # Expected binary data: float (4", "# DimY # DimX # # The axes terminology follows", "Version 3 added offset values to # format 2 in", "after # normalization) and voxel resolution. if header[\"File version\"] >=", "data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterZ\"] = data # First slice", "# --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): Each data element (intensity", "with open(filename, 'rb') as f: # --------------------------------------------------------------------- # VMR Pre-Data", "header[\"VMROrigV16MinValue\"] f.write(struct.pack('<i', data)) data = header[\"VMROrigV16MeanValue\"] f.write(struct.pack('<i', data)) data =", "struct.unpack('<f', f.read(4)) header[\"VoxelSizeZ\"] = data # Voxel resolution along Z", "Slice row direction vector X component data, = struct.unpack('<f', f.read(4))", "data, = struct.unpack('<i', f.read(4)) header[\"NCols\"] = data # Nr of", "subjects. The # intensity values are stored as a series", "f.write(struct.pack('<B', data)) # Expected binary data: int (4 bytes) data", "following table is stored. The # \"type of transformation\" is", "::-1] # Flip BV axes data_img = np.transpose(data_img, (0, 2,", "which might have been # performed to the original data", "# NOTE(Developer Guide 2.6): These four entries have been added", "# BV to Tal data_img = data_img[::-1, ::-1, ::-1] #", "resolution along Y axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeZ\"] =", "internal BrainVoyager (BV) format. # The mapping to Talairach axes", "axis data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeZ\"] = data # Voxel", "= struct.unpack('<B', f.read(1)) header[\"VoxelResolutionVerified\"] = data data, = struct.unpack('<B', f.read(1))", "a series of spatial transformations, which might have been #", "direction vector Z component # Expected binary data: int (4", "data = header[\"SliceThickness\"] f.write(struct.pack('<f', data)) data = header[\"GapThickness\"] f.write(struct.pack('<f', data))", "i in range(data_img.size): data_img[i], = struct.unpack('<B', f.read(1)) data_img = np.reshape(", "data: variable-length string data = header[\"PastTransformation\"][i][\"Name\"] write_variable_length_string(f, data) # Expected", "data)) data = header[\"OffsetZ\"] f.write(struct.pack('<h', data)) data = header[\"FramingCubeDim\"] f.write(struct.pack('<h',", "row direction [mm] data, = struct.unpack('<f', f.read(4)) header[\"FoVCols\"] = data", "f.read(4)) header[\"SliceNCenterX\"] = data # Last slice center X coordinate", "# Expected binary data: unsigned short int (2 bytes) data,", "format contains a small header followed by the # actual", "can be probably ignored for custom routines, but is #", "header[\"PastTransformation\"][i][\"Name\"] write_variable_length_string(f, data) # Expected binary data: int (4 bytes)", "header are new since file version \"3\" and contain offset", "= data # Field of view extent in column dir.", "(4 bytes) data, = struct.unpack('<f', f.read(4)) header[\"VoxelSizeX\"] = data #", "# additional header information after the actual data (\"post-data #", "transformation values as a list trans_values = [] for j", "f.write(struct.pack('<f', data)) data = header[\"ColDirY\"] f.write(struct.pack('<f', data)) data = header[\"ColDirZ\"]", "(4 bytes) data = header[\"PosInfosVerified\"] f.write(struct.pack('<i', data)) data = header[\"CoordinateSystem\"]", "data = header[\"GapThickness\"] f.write(struct.pack('<f', data)) # Expected binary data: int", "follows the internal BrainVoyager (BV) format. # The mapping to", "# Field of view extent in column dir. [mm] data,", "= header[\"LeftRightConvention\"] f.write(struct.pack('<B', data)) data = header[\"ReferenceSpaceVMR\"] f.write(struct.pack('<B', data)) #", "column direction vector X component data, = struct.unpack('<f', f.read(4)) header[\"ColDirY\"]", "# new in v4 # Expected binary data: float (4", "data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header[\"NRows\"] =", "data)) data = header[\"CoordinateSystem\"] f.write(struct.pack('<i', data)) # Expected binary data:", "VMR Post-Data Header # --------------------------------------------------------------------- if header[\"File version\"] >= 3:", "data)) data = header[\"NCols\"] f.write(struct.pack('<i', data)) # Expected binary data:", "the data set, including the assumed # left-right convention, the", "Field of view extent in row direction [mm] data, =", "2.6): For each past transformation, the # information specified in", "coordinate data, = struct.unpack('<f', f.read(4)) header[\"Slice1CenterZ\"] = data # First", "= data data, = struct.unpack('<h', f.read(2)) header[\"OffsetY\"] = data data,", "Convert axes from Nifti standard back to BV standard data_img", "of VMR files is \"4\", which is only slightly different", "NOTE(Developer Guide 2.6): The first four entries of the post-data", "order to represent large data sets efficiently, e.g. in #", "data = header[\"VoxelSizeY\"] f.write(struct.pack('<f', data)) data = header[\"VoxelSizeZ\"] f.write(struct.pack('<f', data))", "matrix # Expected binary data: float (4 bytes) data, =", "VMR file format.\"\"\" import struct import numpy as np from", "for spatial transformation and # coregistration routines as well as", "# BV (Z left -> right) [axis 0 after np.reshape]", "3: # NOTE(Developer Guide 2.6): These four entries have been", "of view extent in column dir. [mm] data, = struct.unpack('<f',", "information # in the post-data header contains position information (if", "data # First slice center Y coordinate data, = struct.unpack('<f',", "# Expected binary data: float (4 bytes) data = header[\"Slice1CenterX\"]", "if the extended information is not needed. The information #", "Expected binary data: int (4 bytes) data = header[\"PastTransformation\"][i][\"NrOfValues\"] f.write(struct.pack('<i',", "f.write(struct.pack('<H', data)) # --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- #", "axis # Expected binary data: char (1 byte) data, =", "data # Voxel resolution along Z axis # Expected binary", "Expected binary data: variable-length string data = header[\"PastTransformation\"][i][\"Name\"] write_variable_length_string(f, data)", "needed. The information # in the post-data header contains position", "The data is organized in three loops: # DimZ #", "data_img = np.reshape( data_img, (header[\"DimZ\"], header[\"DimY\"], header[\"DimX\"])) data_img = np.transpose(data_img,", "data = header[\"OffsetZ\"] f.write(struct.pack('<h', data)) data = header[\"FramingCubeDim\"] f.write(struct.pack('<h', data))", "VMR (e.g. ACPC transformation). Finally, additional # information further descries", "data # Slice column direction vector Z component # Expected", "matrix) # \"4\": Talairach transformation # \"5\": Un-Talairach transformation (1", ": string Path to file. Returns ------- header : dictionary", "the post-data # header are new since file version \"3\"", "# Voxel resolution along Y axis data, = struct.unpack('<f', f.read(4))", "numpy.array Image data. \"\"\" header = dict() with open(filename, 'rb')", "if header[\"NrOfPastSpatialTransformations\"] != 0: # NOTE(Developer Guide 2.6): For each", "in # terms of BrainVoyager's internal conventions but follow the", "Field of view extent in column dir. [mm] data, =", "header[\"ColDirX\"] = data # Slice column direction vector X component", "# ============================================================================= def write_vmr(filename, header, data_img): \"\"\"Protocol to write Brainvoyager", "if header[\"NrOfPastSpatialTransformations\"] != 0: for i in range(header[\"NrOfPastSpatialTransformations\"]): # Expected", "and higher contain # additional header information after the actual", "# DICOM files. The coordinate axes labels in these entries", "header[\"SliceNCenterZ\"] = data # Last slice center Z coordinate data,", "sets with minimal header # checking if the extended information", "struct.unpack('<f', f.read(4)) header[\"ColDirZ\"] = data # Slice column direction vector", "2.6): Each data element (intensity value) is # represented in", "data, = struct.unpack('<i', f.read(4)) header[\"VMROrigV16MinValue\"] = data # 16-bit data", "open(filename, 'wb') as f: # --------------------------------------------------------------------- # VMR Pre-Data Header", "not needed. The information # in the post-data header contains", "= data # Slice column direction vector X component data,", "0 after np.reshape] = X in Tal space # Expected" ]
[ "[.77, .78] for (m, g) in zip(models, accs): acc =", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "+ .1 def test_imagenet1k_inception_bn(**kwargs): acc = mx.metric.create('acc') m = 'imagenet1k-inception-bn'", "OF ANY # KIND, either express or implied. See the", "'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def test_imagenet1k_resnet(**kwargs): models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs =", "test_imagenet1k_inception_bn(**kwargs): acc = mx.metric.create('acc') m = 'imagenet1k-inception-bn' g = 0.75", "Software Foundation (ASF) under one # or more contributor license", "more contributor license agreements. See the NOTICE file # distributed", "Unless required by applicable law or agreed to in writing,", "print_function import mxnet as mx from common import find_mxnet, modelzoo", "assert len(gpus) > 0 batch_size = 16 * len(gpus) gpus", "an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "# regarding copyright ownership. The ASF licenses this file #", "speed = %f img/sec' % (m, r, speed)) assert r", "Apache Software Foundation (ASF) under one # or more contributor", "mx from common import find_mxnet, modelzoo from score import score", "in compliance # with the License. You may obtain a", "# to you under the Apache License, Version 2.0 (the", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "License for the # specific language governing permissions and limitations", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "acc = %f, speed = %f img/sec' % (m, r,", "with this work for additional information # regarding copyright ownership.", "governing permissions and limitations # under the License. \"\"\" test", "__future__ import print_function import mxnet as mx from common import", "= mx.metric.create('acc') m = 'imagenet1k-inception-bn' g = 0.75 (speed,) =", "(ASF) under one # or more contributor license agreements. See", "2.0 (the # \"License\"); you may not use this file", "OR CONDITIONS OF ANY # KIND, either express or implied.", "mxnet as mx from common import find_mxnet, modelzoo from score", ".1 def test_imagenet1k_inception_bn(**kwargs): acc = mx.metric.create('acc') m = 'imagenet1k-inception-bn' g", "< g + .1 def test_imagenet1k_inception_bn(**kwargs): acc = mx.metric.create('acc') m", "and r < g + .1 if __name__ == '__main__':", "(m, g) in zip(models, accs): acc = mx.metric.create('acc') (speed,) =", "# or more contributor license agreements. See the NOTICE file", "assert r > g and r < g + .1", "agreed to in writing, # software distributed under the License", "r > g and r < g + .1 def", "# under the License. \"\"\" test pretrained models \"\"\" from", "r, speed)) assert r > g and r < g", "= score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs) r = acc.get()[1] print('Tested", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "work for additional information # regarding copyright ownership. The ASF", "> g and r < g + .1 def test_imagenet1k_inception_bn(**kwargs):", "specific language governing permissions and limitations # under the License.", "16 * len(gpus) gpus = ','.join([str(i) for i in gpus])", "under the License is distributed on an # \"AS IS\"", "this file # to you under the Apache License, Version", "gpus = mx.test_utils.list_gpus() assert len(gpus) > 0 batch_size = 16", "distributed under the License is distributed on an # \"AS", "models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs = [.77, .78] for (m,", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "g and r < g + .1 def test_imagenet1k_inception_bn(**kwargs): acc", "copyright ownership. The ASF licenses this file # to you", "# software distributed under the License is distributed on an", "import print_function import mxnet as mx from common import find_mxnet,", "0.75 (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs) r =", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "limitations # under the License. \"\"\" test pretrained models \"\"\"", "test pretrained models \"\"\" from __future__ import print_function import mxnet", "== '__main__': gpus = mx.test_utils.list_gpus() assert len(gpus) > 0 batch_size", "(the # \"License\"); you may not use this file except", "the License. You may obtain a copy of the License", "rgb_mean='0,0,0', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s, acc =", "in writing, # software distributed under the License is distributed", "len(gpus) > 0 batch_size = 16 * len(gpus) gpus =", "batch_size = 16 * len(gpus) gpus = ','.join([str(i) for i", "score import score VAL_DATA='data/val-5k-256.rec' def download_data(): return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA)", "distributed with this work for additional information # regarding copyright", "< g + .1 if __name__ == '__main__': gpus =", "modelzoo from score import score VAL_DATA='data/val-5k-256.rec' def download_data(): return mx.test_utils.download(", "import mxnet as mx from common import find_mxnet, modelzoo from", "r = acc.get()[1] print('Tested %s, acc = %f, speed =", "(speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs) r = acc.get()[1]", "def test_imagenet1k_inception_bn(**kwargs): acc = mx.metric.create('acc') m = 'imagenet1k-inception-bn' g =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "acc.get()[1] print('Tested %s, acc = %f, speed = %f img/sec'", "License is distributed on an # \"AS IS\" BASIS, WITHOUT", "ASF licenses this file # to you under the Apache", "under the Apache License, Version 2.0 (the # \"License\"); you", "for the # specific language governing permissions and limitations #", "= mx.metric.create('acc') (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs) r", "r < g + .1 def test_imagenet1k_inception_bn(**kwargs): acc = mx.metric.create('acc')", "import score VAL_DATA='data/val-5k-256.rec' def download_data(): return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def", "**kwargs) r = acc.get()[1] print('Tested %s, acc = %f, speed", "%f, speed = %f img/sec' % (m, r, speed)) assert", "distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "on an # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "regarding copyright ownership. The ASF licenses this file # to", "See the License for the # specific language governing permissions", "to in writing, # software distributed under the License is", "or agreed to in writing, # software distributed under the", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "ownership. The ASF licenses this file # to you under", "g = 0.75 (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs)", "metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s, acc = %f,", "% (m, r, speed)) assert r > g and r", "score VAL_DATA='data/val-5k-256.rec' def download_data(): return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def test_imagenet1k_resnet(**kwargs):", "import find_mxnet, modelzoo from score import score VAL_DATA='data/val-5k-256.rec' def download_data():", "mx.metric.create('acc') m = 'imagenet1k-inception-bn' g = 0.75 (speed,) = score(model=m,", "accs): acc = mx.metric.create('acc') (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc,", "m = 'imagenet1k-inception-bn' g = 0.75 (speed,) = score(model=m, data_val=VAL_DATA,", "= [.77, .78] for (m, g) in zip(models, accs): acc", "# \"License\"); you may not use this file except in", "zip(models, accs): acc = mx.metric.create('acc') (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0',", "# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "common import find_mxnet, modelzoo from score import score VAL_DATA='data/val-5k-256.rec' def", "to the Apache Software Foundation (ASF) under one # or", "r < g + .1 if __name__ == '__main__': gpus", "and limitations # under the License. \"\"\" test pretrained models", "\"License\"); you may not use this file except in compliance", "file # distributed with this work for additional information #", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "> 0 batch_size = 16 * len(gpus) gpus = ','.join([str(i)", "with the License. You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "(m, r, speed)) assert r > g and r <", "\"\"\" from __future__ import print_function import mxnet as mx from", "or more contributor license agreements. See the NOTICE file #", "applicable law or agreed to in writing, # software distributed", "# distributed with this work for additional information # regarding", "this work for additional information # regarding copyright ownership. The", "VAL_DATA='data/val-5k-256.rec' def download_data(): return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def test_imagenet1k_resnet(**kwargs): models", "License. \"\"\" test pretrained models \"\"\" from __future__ import print_function", "= 0.75 (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs) r", "rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s acc =", "writing, # software distributed under the License is distributed on", "= score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs) r = acc.get()[1] print('Tested", "the NOTICE file # distributed with this work for additional", "','.join([str(i) for i in gpus]) kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500}", "is distributed on an # \"AS IS\" BASIS, WITHOUT WARRANTIES", "implied. See the License for the # specific language governing", "file # to you under the Apache License, Version 2.0", "speed)) assert r > g and r < g +", "to you under the Apache License, Version 2.0 (the #", "i in gpus]) kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500} download_data() test_imagenet1k_resnet(**kwargs)", "CONDITIONS OF ANY # KIND, either express or implied. See", "in gpus]) kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500} download_data() test_imagenet1k_resnet(**kwargs) test_imagenet1k_inception_bn(**kwargs)", "# with the License. You may obtain a copy of", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "'__main__': gpus = mx.test_utils.list_gpus() assert len(gpus) > 0 batch_size =", "test_imagenet1k_resnet(**kwargs): models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs = [.77, .78] for", "__name__ == '__main__': gpus = mx.test_utils.list_gpus() assert len(gpus) > 0", "from __future__ import print_function import mxnet as mx from common", "+ .1 if __name__ == '__main__': gpus = mx.test_utils.list_gpus() assert", "may not use this file except in compliance # with", "g + .1 if __name__ == '__main__': gpus = mx.test_utils.list_gpus()", "in zip(models, accs): acc = mx.metric.create('acc') (speed,) = score(model=m, data_val=VAL_DATA,", "if __name__ == '__main__': gpus = mx.test_utils.list_gpus() assert len(gpus) >", "software distributed under the License is distributed on an #", "Licensed to the Apache Software Foundation (ASF) under one #", "the License. \"\"\" test pretrained models \"\"\" from __future__ import", "for additional information # regarding copyright ownership. The ASF licenses", "len(gpus) gpus = ','.join([str(i) for i in gpus]) kwargs =", "the Apache Software Foundation (ASF) under one # or more", "g) in zip(models, accs): acc = mx.metric.create('acc') (speed,) = score(model=m,", "= mx.test_utils.list_gpus() assert len(gpus) > 0 batch_size = 16 *", "# # Unless required by applicable law or agreed to", "Version 2.0 (the # \"License\"); you may not use this", "score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s", "under one # or more contributor license agreements. See the", "r > g and r < g + .1 if", "one # or more contributor license agreements. See the NOTICE", "License, Version 2.0 (the # \"License\"); you may not use", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "= ['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs = [.77, .78] for (m, g)", "either express or implied. See the License for the #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "= acc.get()[1] print('Tested %s, acc = %f, speed = %f", "print('Tested %s, acc = %f, speed = %f img/sec' %", "KIND, either express or implied. See the License for the", "from score import score VAL_DATA='data/val-5k-256.rec' def download_data(): return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec',", "information # regarding copyright ownership. The ASF licenses this file", "for (m, g) in zip(models, accs): acc = mx.metric.create('acc') (speed,)", "= %f img/sec' % (m, r, speed)) assert r >", "g + .1 def test_imagenet1k_inception_bn(**kwargs): acc = mx.metric.create('acc') m =", "pretrained models \"\"\" from __future__ import print_function import mxnet as", "mx.metric.create('acc') (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs) r =", "the Apache License, Version 2.0 (the # \"License\"); you may", "* len(gpus) gpus = ','.join([str(i) for i in gpus]) kwargs", "for i in gpus]) kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500} download_data()", "permissions and limitations # under the License. \"\"\" test pretrained", "mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def test_imagenet1k_resnet(**kwargs): models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs", "acc = mx.metric.create('acc') (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs)", "return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def test_imagenet1k_resnet(**kwargs): models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152']", "except in compliance # with the License. You may obtain", "additional information # regarding copyright ownership. The ASF licenses this", "score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s,", "= acc.get()[1] print('Tested %s acc = %f, speed = %f", "from common import find_mxnet, modelzoo from score import score VAL_DATA='data/val-5k-256.rec'", "print('Tested %s acc = %f, speed = %f img/sec' %", "you under the Apache License, Version 2.0 (the # \"License\");", "'imagenet1k-inception-bn' g = 0.75 (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc,", "under the License. \"\"\" test pretrained models \"\"\" from __future__", "def download_data(): return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def test_imagenet1k_resnet(**kwargs): models =", "or implied. See the License for the # specific language", "(speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs) r = acc.get()[1]", "See the NOTICE file # distributed with this work for", "# KIND, either express or implied. See the License for", "express or implied. See the License for the # specific", "['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs = [.77, .78] for (m, g) in", "r = acc.get()[1] print('Tested %s acc = %f, speed =", "NOTICE file # distributed with this work for additional information", "> g and r < g + .1 if __name__", "download_data(): return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def test_imagenet1k_resnet(**kwargs): models = ['imagenet1k-resnet-50',", "= %f, speed = %f img/sec' % (m, r, speed))", ".78] for (m, g) in zip(models, accs): acc = mx.metric.create('acc')", "this file except in compliance # with the License. You", "g and r < g + .1 if __name__ ==", "agreements. See the NOTICE file # distributed with this work", "Apache License, Version 2.0 (the # \"License\"); you may not", "'imagenet1k-resnet-152'] accs = [.77, .78] for (m, g) in zip(models,", "the # specific language governing permissions and limitations # under", "licenses this file # to you under the Apache License,", "VAL_DATA) def test_imagenet1k_resnet(**kwargs): models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs = [.77,", "license agreements. See the NOTICE file # distributed with this", "and r < g + .1 def test_imagenet1k_inception_bn(**kwargs): acc =", "data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s acc", "acc.get()[1] print('Tested %s acc = %f, speed = %f img/sec'", "required by applicable law or agreed to in writing, #", "by applicable law or agreed to in writing, # software", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "= 16 * len(gpus) gpus = ','.join([str(i) for i in", "models \"\"\" from __future__ import print_function import mxnet as mx", "mx.test_utils.list_gpus() assert len(gpus) > 0 batch_size = 16 * len(gpus)", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "img/sec' % (m, r, speed)) assert r > g and", "The ASF licenses this file # to you under the", "as mx from common import find_mxnet, modelzoo from score import", "gpus = ','.join([str(i) for i in gpus]) kwargs = {'gpus':gpus,", "file except in compliance # with the License. You may", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "# specific language governing permissions and limitations # under the", "0 batch_size = 16 * len(gpus) gpus = ','.join([str(i) for", "the License for the # specific language governing permissions and", "License. You may obtain a copy of the License at", "You may obtain a copy of the License at #", "ANY # KIND, either express or implied. See the License", ".1 if __name__ == '__main__': gpus = mx.test_utils.list_gpus() assert len(gpus)", "# Licensed to the Apache Software Foundation (ASF) under one", "the License is distributed on an # \"AS IS\" BASIS,", "language governing permissions and limitations # under the License. \"\"\"", "you may not use this file except in compliance #", "\"\"\" test pretrained models \"\"\" from __future__ import print_function import", "%s acc = %f, speed = %f img/sec' % (m,", "= 'imagenet1k-inception-bn' g = 0.75 (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939',", "metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s acc = %f,", "accs = [.77, .78] for (m, g) in zip(models, accs):", "find_mxnet, modelzoo from score import score VAL_DATA='data/val-5k-256.rec' def download_data(): return", "use this file except in compliance # with the License.", "compliance # with the License. You may obtain a copy", "%f img/sec' % (m, r, speed)) assert r > g", "def test_imagenet1k_resnet(**kwargs): models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs = [.77, .78]", "law or agreed to in writing, # software distributed under", "%s, acc = %f, speed = %f img/sec' % (m,", "contributor license agreements. See the NOTICE file # distributed with", "data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s, acc", "acc = mx.metric.create('acc') m = 'imagenet1k-inception-bn' g = 0.75 (speed,)", "**kwargs) r = acc.get()[1] print('Tested %s acc = %f, speed", "= ','.join([str(i) for i in gpus]) kwargs = {'gpus':gpus, 'batch_size':batch_size,", "Foundation (ASF) under one # or more contributor license agreements.", "not use this file except in compliance # with the" ]
[ "parent vDataFrame is modified. Attributes ---------- alias, str : vColumn", "\"DECODE({}, '{}', 1, 0)\".format( \"{}\", str(distinct_elements[k]).replace(\"'\", \"''\") ) transformations =", "if (nullifzero) else \"\", cmax, cmin, ), \"float\", \"float\", )", "One-Hot Encoding. \"\"\" check_types([(\"response\", response, [str])]) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response)", "between the max and the min sem : standard error", "of its affiliates. # Licensed under the Apache License, Version", "in (\"float\", \"int\"), TypeError( \"The column 'numcol' must be numerical\"", "OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ), ) else:", "= \"(\", \")\" else: lp, rp = \"\", \"\" for", "to the input vColumn. \"\"\" check_types( [ (\"breaks\", breaks, [list]),", "---# def density( self, by: str = \"\", bandwidth: float", "if the vColumn is boolean. See Also -------- vDataFrame[].isdate :", "vColumn database type. \"\"\" return self.transformations[-1][2] # ---# def clip(self,", "the slicing. step: int Size of the slicing. Returns -------", "{} AS verticapy_agg FROM {} WHERE {} IS NOT NULL", "(Standard Deviation). Returns ------- float std See Also -------- vDataFrame.aggregate", "this issue.\" ) self.add_copy(new_name) parent = self.drop(add_history=False) parent.__add_to_history__( \"[Rename]: The", "to True, the approximate quantile is returned. By setting this", "self.aggregate(func=[prefix + \"{}%\".format(x * 100)]).values[self.alias][ 0 ] # ---# def", "\"exp\", \"floor\", \"ln\", \"log\", \"log10\", \"mod\", \"pow\", \"round\", \"sign\", \"sin\",", "h = round(h, 4) elif h > 0.000001: h =", "return parent # ---# def round(self, n: int): \"\"\" ---------------------------------------------------------------------------", "[0, 1], TypeError( \"vColumn {} must be binary to use", "in range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += [(func,", "[(\"{}\", self.ctype(), self.category())] self.transformations += [ (\"AVG({}) OVER (PARTITION BY", "title=\"Different aggregations to compute the optimal h.\", method=\"fetchrow\", ) count,", "object See Also -------- vDataFrame.donut : Draws the donut chart", "cotangent exp : exponential function floor : value down to", "def abs(self): \"\"\" --------------------------------------------------------------------------- Applies the absolute value function to", "of nbins will be computed. h: float, optional Interval width", "slower if the vDataFrame has been transformed multiple times, so", "LIMIT 1\".format( avg, stddev, self.parent.__genSQL__() ), print_time_sql=False, ) except: avg,", "result[i - 1], result[i] ) trans += \" ELSE NULL", "input vColumn. \"\"\" check_types([(\"n\", n, [int, float])]) return self.apply(func=\"ROUND({}, {})\".format(\"{}\",", "value_counts(self, k: int = 30): \"\"\" --------------------------------------------------------------------------- Returns the k", "The expression generated will look like: even: CASE ... WHEN", "str): \"\"\" --------------------------------------------------------------------------- Renames the vColumn by dropping the current", "\"\"\" --------------------------------------------------------------------------- Replaces the regular expression matches in each of", "if not xlim: xmin = self.min() xmax = self.max() else:", "\"floor\", \"ln\", \"log\", \"log10\", \"mod\", \"pow\", \"round\", \"sign\", \"sin\", \"sinh\",", "std robust_zscore : Normalization using the Robust Z-Score (median and", "Applies the absolute value function to the input vColumn. Returns", "or not (isinstance(cat_priority, Iterable)): cat_priority = [cat_priority] check_types( [ (\"by\",", "- alpha], [self.alias]) .transpose() .values[self.alias] ) self.parent.filter( \"({} BETWEEN {}", "from the vDataFrame. Dropping a vColumn means simply not selecting", "final_transformation = [ ( \"({} - {}) / {}({} -", "FROM {}{} GROUP BY {} ORDER BY _verticapy_cnt_ DESC {}\".format(", "0: index_start += self.parent.shape()[0] if isinstance(index_stop, int): if index_stop <", "of the vColumn records by an input value. vDataFrame[].str_slice :", "1] ... END odd : CASE ... WHEN vColumn =", "---# def boxplot( self, by: str = \"\", h: float", "vDataFrame self.parent See Also -------- vDataFrame.astype : Converts the vColumns", "+= [\"\"] result.values[\"woe\"] += [\"\"] result.values[\"iv\"] += [sum(result[\"iv\"])] return result", "name: str Name of the copy. Returns ------- vDataFrame self.parent", "set to True, the approximate quantile is returned. By setting", "and method == \"smart\": schema = verticapy.options[\"temp_schema\"] if not (schema):", "query = \"SELECT {} AS {} FROM {} WHERE {}", "[order_by] check_types( [ ( \"method\", method, [ \"auto\", \"mode\", \"0ifnull\",", "total > 1 else \" was \" if verticapy.options[\"print_info\"]: print(\"{}", "if not (isinstance(index_start, int)): index_start = 0 if index_start <", "information. Parameters ---------- k: int, optional Number of most occurent", "ctype, category)] self.parent[copy_name].catalog = self.catalog self.parent.__add_to_history__( \"[Apply]: The vColumn '{}'", "start_date: Union[str, datetime.datetime, datetime.date] = \"\", end_date: Union[str, datetime.datetime, datetime.date]", "stddev = std # ---# def store_usage(self): \"\"\" --------------------------------------------------------------------------- Returns", "result of using previously the method on the vColumn \"", "[], ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the box plot", "+= [name] conj = \"s were \" if len(all_new_features) >", "License for the specific language governing permissions and # limitations", "vColumn categories. Parameters ---------- response: str Response vColumn. Returns -------", "using a user-defined encoding. Parameters ---------- argv: object Any amount", "dtype ) ) return self.parent except Exception as e: raise", "more information, see utilities.tablesample. See Also -------- vDataFrame[].nsmallest : Returns", "fd else: best_h = max(sturges, fd) self.parent.__update_catalog__({\"index\": [\"numh\"], self.alias: [best_h]})", "{} < {} THEN {} WHEN {} > {} THEN", "result[\"std\"][0], threshold ) ) else: p_alpha, p_1_alpha = ( self.parent.quantile([alpha,", "vColumn {}\\n{} feature{}created: {}\".format( self.alias, len(all_new_features), conj, \", \".join(all_new_features) )", "from verticapy.errors import * ## # # __ __ ______", "'avg' (Average). Returns ------- float average See Also -------- vDataFrame.aggregate", "= \"SELECT {}, ord, non_events, events, non_events / NULLIFZERO(SUM(non_events) OVER", "is date, False otherwise. Returns ------- bool True if the", "\"{}%\".format(x * 100)]).values[self.alias][ 0 ] # ---# def range_plot( self,", "[name] self.parent.__add_to_history__( \"[Add Copy]: A copy of the vColumn {}", "set to 'smart'. A RF Regressor will be trained if", "argv: object Any amount of expressions. The expression generated will", "compute the optimal h. auto : Combination of Freedman Diaconis", "return ax kernel = kernel.lower() from verticapy.learn.neighbors import KernelDensity schema", "will be transformed. Parameters ---------- start: int Start of the", "self.parent[by].distinct() for idx, column in enumerate(columns): param = {\"color\": colors[idx", "records. vDataFrame[].str_count : Computes the number of matches for the", "can make the vDataFrame \"heavier\" if it is used to", "Matplotlib axes object See Also -------- vDataFrame[].hist : Draws the", "|| '', COUNT(*) FROM vdf_table GROUP BY {0} ORDER BY", "float = 0.05, ): \"\"\" --------------------------------------------------------------------------- Fills the vColumns outliers", "the copy. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.eval", "\\_\\\\\"\\_\\ # \\/_/ \\/_____/ \\/_____/ \\/_____/ \\/_____/ \\/_/ \\/_/ \\/_/", "vColumn. vDataFrame[].extract : Extracts the regular expression in each record", "# (c) Copyright [2018-2022] Micro Focus or one of its", "value. winsorize : Clips the vColumn using as lower bound", "8, cat_priority: list = [], ax=None, **style_kwds, ): \"\"\" ---------------------------------------------------------------------------", "\"same_width\", \"same_freq\", \"topk\"], ), (\"return_enum_trans\", return_enum_trans, [bool]), ] ) method", "LIMIT 1\".format( self.alias, self.alias, self.parent.__genSQL__(), where, self.alias, n ), title=\"Computing", "self.alias ), method=\"fetchall\", ) ] if mean_alpha == None: mean_alpha", "\"categorical\"): result = self.aggregate([\"count\", \"min\", \"max\"]) index = result.values[\"index\"] result", "verticapy.plot import bar return bar(self, method, of, max_cardinality, nbins, h,", "== \"smart\" ): n = len(result) trans = \"(CASE \"", "{}, {})\".format(\"{}\", start, step)) # ---# def sub(self, x: float):", "[ (\"by\", by, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h,", "[float(round(item[2], 3)) for item in result], } return tablesample(values) #", "\"\"\" return self.describe(method=\"categorical\", max_cardinality=k) # ---# def var(self): \"\"\" ---------------------------------------------------------------------------", "def density( self, by: str = \"\", bandwidth: float =", "if method in (\"mean\", \"median\") or isinstance(val, float): category, ctype", ") p_alpha, p_1_alpha = executeSQL( query=query, title=\"Computing the quantiles of", "quantile. For example: 0.25 represents Q1. approx: bool, optional If", "range(1, n): trans += \"WHEN {} BETWEEN {} AND {}", "Returns ------- bool True if the vColumn is numerical. See", "not in ([0, 1], [1, 0]) or self.isbool(): all_new_features =", "return self.parent.aggregate(func=func, columns=[self.alias]).transpose() agg = aggregate # ---# def apply(self,", "{2} AS percent, AVG({3}{4}) AS mean, STDDEV({3}{4}) AS std, MIN({3}{4})", "\"winsorize\", threshold: float = 4.0, use_threshold: bool = True, alpha:", "information, see utilities.tablesample. See Also -------- vDataFrame[].nlargest : Returns the", "not (\"figsize\" in kwargs): kwargs[\"figsize\"] = (14, 10) return self.parent[columns].to_geopandas(self.alias).plot(*args,", "\"\"\" check_types([(\"dtype\", dtype, [str])]) try: query = \"SELECT {}::{} AS", ") elif method == \"zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"mean\"])", "vDataFrame input aggregations. \"\"\" check_types([(\"dropna\", dropna, [bool]), (\"n\", n, [int,", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"sem\"]).values[self.alias][0] # ---# def", "== \"median\") else \"AVG\" if by == []: if fun", "\"COALESCE({}, {})\".format(\"{}\", expr) elif method == \"0ifnull\": new_column = \"DECODE({},", "ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}\".format( bin_spatial_to_str(self.category(), self.alias), self.parent.__genSQL__(), ), title=\"Computing the Store", "of the search. See Also -------- vDataFrame.isin : Looks if", "/ {}) * {} + {}{}) || ']'\".format( \"{}\", h,", "method == \"zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"mean\"]) / sauv[", ": Returns True if the vColumn is boolean. vDataFrame[].isnum :", "WHEN ({} BETWEEN {} AND {}) THEN {} ELSE NULL", "dict, optional Dictionary of the Random Forest model parameters used", "\"float\", ) ] elif method == \"robust_zscore\": if n >", "[int, float]), (\"response\", response, [str]), (\"nbins\", nbins, [int, float]), (", "if use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values self.parent.filter( \"ABS({} - {})", "# ---# def isbool(self): \"\"\" --------------------------------------------------------------------------- Returns True if the", "method. Parameters ---------- method: str, optional The describe method. auto", "/ (1.4826 * mad) minmax : Normalization using the MinMax", "None else \"NULL\", elem[2] if elem[2] != None else \"NULL\",", "float = 1.0, kernel: str = \"gaussian\", nbins: int =", "check_types([(\"val\", val, [list])]) val = {self.alias: val} return self.parent.isin(val) #", "\"(CASE WHEN {} IN ({}) THEN {} || '' ELSE", "'', COUNT(*) FROM vdf_table GROUP BY {0} ORDER BY COUNT(*)", "[self.alias] check = True if len(args) > 0: column =", "categories of {}.\".format(self.alias), method=\"fetchall\", ) return [item for sublist in", "str): method = method.lower() check_types( [ (\"method\", method, [\"winsorize\", \"null\",", "OF ANY KIND, either express or implied. # See the", "1 then this method will return the mode of the", "the vColumn. vDataFrame[].extract : Extracts the regular expression in each", "in result if elem[1] != None ] ), ) cmax", "Returns ------- ax Matplotlib axes object \"\"\" columns = [self.alias]", "date. See Also -------- vDataFrame[].isbool : Returns True if the", "records are in the vDataFrame. \"\"\" if isinstance(val, str) or", "= self.distinct() expr = [\"DECODE({}\"] text_info = \"\\n\" for k", "the vColumn by the input element. Parameters ---------- x: float", "\"SELECT split_value FROM (SELECT split_value, MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE WHERE", "= False, ): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using the", "verticapy.options[\"print_info\"]: print(\"{} element{}filled.\".format(total, conj)) self.parent.__add_to_history__( \"[Fillna]: {} {} missing value{}", "---# def category(self): \"\"\" --------------------------------------------------------------------------- Returns the category of the", "== \"zscore\": self.catalog[\"mean\"] = 0 self.catalog[\"std\"] = 1 elif method", "str(func) check_types([(\"func\", func, [str]), (\"copy_name\", copy_name, [str])]) try: try: ctype", "\" if len(all_new_features) > 1 else \" was \" self.parent.__add_to_history__(", "kurtosis # ---# def label_encode(self): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn", ": trigonometric sine sinh : hyperbolic sine sqrt : arithmetic", "from verticapy.plot import pie return pie( self, method, of, max_cardinality,", "= \"\", end_date: Union[str, datetime.datetime, datetime.date] = \"\", plot_median: bool", "float], ), ( \"end_date\", end_date, [str, datetime.datetime, datetime.date, int, float],", "self.transformations = ( parent, alias, [elem for elem in transformations],", "(WOE) Table. It tells the predictive power of an independent", "= \"(SELECT DATEDIFF('second', '{}'::timestamp, {}) AS {} FROM {}) VERTICAPY_OPTIMAL_H_TABLE\".format(", "new_name, [str])]) old_name = quote_ident(self.alias) new_name = new_name.replace('\"', \"\") assert", "method.lower() self.parent.are_namecols_in([elem for elem in order_by] + by) by =", "vColumn has already the alias {new_name}.\\nBy changing the parameter 'new_name',", "--------------------------------------------------------------------------- Clips the vColumn by transforming the values lesser than", "of the vColumn. Returns ------- list Distinct caterogies of the", "columns=[self.alias]).transpose() agg = aggregate # ---# def apply(self, func: str,", "aad : average absolute deviation approx_unique : approximative cardinality count", "---------- name: str Name of the copy. Returns ------- vDataFrame", ") # ---# def std(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "return self.apply( func=\"REGEXP_SUBSTR({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def", "vDataFrame.isin : Looks if some specific records are in the", "return_trans: return \"({} - {}) / ({})\".format(self.alias, med, mad) else:", "zscore|minmax\" warnings.warn(warning_message, Warning) return self mad, med = self.aggregate([\"mad\", \"approx_median\"]).values[self.alias]", "\"\" if (h > 1) or (self.category() == \"float\"): trans", "aggregations during the computation. cat_stats : Computes statistics of a", "= 0 THEN 0 ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS", "AS iv FROM ({}) x ORDER BY ord\".format( self.alias, query,", "copy_name: str, optional If not empty, a copy will be", ") stddev = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [", "the vColumn with user defined Encoding. vDataFrame[].discretize : Discretizes the", "[\"\"] result.values[\"woe\"] += [\"\"] result.values[\"iv\"] += [sum(result[\"iv\"])] return result #", ": Average of the vColumn 'of'. min : Minimum of", "LIMIT 10\".format( self.parent.__genSQL__(force_columns=force_columns) ), print_time_sql=False, ) self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias) delattr(self.parent, self.alias) except:", "\\_\\ \\ \\ \\ \\-./\\ \\ \\ \\ \\-. \\", "result = ( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False ) .transpose() .values[self.alias]", "# ---# def isin(self, val: list, *args): \"\"\" --------------------------------------------------------------------------- Looks", "else: final_transformation = [ ( \"({} - {}) / {}({})\".format(", "= 4.0, use_threshold: bool = True, alpha: float = 0.05,", "= \"COALESCE({}, DECODE({}, {}, NULL))\".format( \"{}\", by[0], \", \".join( [\"{},", ") assert not (self.parent.is_colname_in(name)), NameError( f\"A vColumn has already the", "same_width : Computes regular width bins. smart : Uses the", "BY {})\".format( self.alias, \", \".join(by) ), ) else: avg, stddev", "(SELECT * FROM {}) (SELECT AVG({}) FROM vdf_table WHERE {}", "+ [str(count)]) ) query = \"SELECT {} FROM (SELECT {},", "= self.parent.format_colnames(numcol) assert self.parent[numcol].category() in (\"float\", \"int\"), TypeError( \"The column", "0)\".format( \"{}\", str(distinct_elements[k]).replace(\"'\", \"''\") ) transformations = self.transformations + [(expr,", "= executeSQL( \"SELECT {}, MIN({}), MAX({}) FROM {} GROUP BY", "[list]), (\"labels\", labels, [list]), (\"include_lowest\", include_lowest, [bool]), (\"right\", right, [bool]),", "modified. Attributes ---------- alias, str : vColumn alias. catalog, dict", "For more information, see utilities.tablesample. See Also -------- vDataFrame[].describe :", "---------- breaks: list List of values used to cut the", "str = \"\", max_cardinality: int = 6, nbins: int =", "x: float): \"\"\" --------------------------------------------------------------------------- Adds the input element to the", "the different transformations. \"\"\" # # Special Methods # #", "median See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "Computes statistics of a numerical column for each vColumn category.", "\"SELECT {} FROM (SELECT {}, COUNT(*) AS _verticapy_cnt_ FROM {}{}GROUP", "ax Matplotlib axes object \"\"\" columns = [self.alias] check =", "of the slicing instead of the ceiling. Returns ------- vDataFrame", "records. For example, to check if Badr and Fouad are", "not use this file except in compliance with the License.", "\"cramer\": {}, \"biserial\": {}, \"regr_avgx\": {}, \"regr_avgy\": {}, \"regr_count\": {},", "self, method: str = \"winsorize\", threshold: float = 4.0, use_threshold:", "k of the 'topk' method. new_category: str, optional The name", "int(nbins))) assert nb != 0, Exception( \"Not enough values to", "Extracts a specific TS field from the vColumn (only if", "specific records are in the vDataFrame. \"\"\" if isinstance(val, str)", "self.alias, where, ) result = executeSQL( query=query, title=\"Computing the equal", "(\"float\", \"int\") # ---# def iv_woe(self, y: str, nbins: int", "'{}'.\".format( self.alias, method ) ) else: raise TypeError(\"The vColumn must", "display. offset: int, optional Number of elements to skip. Returns", ") result = executeSQL( query=query, title=\"Computing the equal frequency histogram", "Modules # # Standard Python Modules import math, re, decimal,", "vColumns.\" ) assert len(breaks) >= 2, ParameterError( \"Length of parameter", "0: op1, close_l = \"<\", \"]\" if labels: label =", "val = self.mode(dropna=True) if val == None: warning_message = \"The", "op1, op2, close_l, close_r = \"<\", \"<=\", \"]\", \"]\" else:", "\"mean\"]), (\"alpha\", alpha, [int, float]), (\"use_threshold\", use_threshold, [bool]), (\"threshold\", threshold,", "list, optional List of the different transformations. Each transformation must", "(cardinality). Parameters ---------- approx: bool, optional If set to True,", "be slower if the vDataFrame has been transformed multiple times,", "expression. \"\"\" if isinstance(func, str_sql): func = str(func) check_types([(\"func\", func,", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"min\"]).values[self.alias][0] #", "\"''\"), ), \"varchar\", \"text\", ) elif self.isnum() and method ==", "input aggregations. \"\"\" return self.aggregate([\"mad\"]).values[self.alias][0] # ---# def max(self): \"\"\"", "elif method in (\"ffill\", \"pad\", \"bfill\", \"backfill\"): assert order_by, ParameterError(", "using 'min' (Minimum). Returns ------- float/str minimum See Also --------", "# ---# def count(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "creation of correlated features. use_numbers_as_suffix: bool, optional Uses numbers as", "x, [int, float], (\"approx\", approx, [bool]))]) prefix = \"approx_\" if", "(dropna) else \" \" result = executeSQL( \"SELECT {} FROM", "the regular expression in each record of the vColumn. The", "gaussian : Gaussian kernel. logistic : Logistic kernel. sigmoid :", "0, \"max\": 1, \"count\": self.parent.shape()[0], \"percent\": 100.0, \"unique\": 2, \"approx_unique\":", "False, the function's performance can drastically decrease. Returns ------- int", "self.parent[response].isnum(), TypeError( \"The response column must be numerical to use", "str vColumn DB type. \"\"\" return self.transformations[-1][1].lower() dtype = ctype", "\"heavier\" if it is used to compute other vColumns. Parameters", ") executeSQL( \"SELECT {}, {} FROM {} LIMIT 1\".format( avg,", "the k most occurent elements, how often they occur, and", "float, optional Interval width of the bar. If empty, an", "h. auto : Combination of Freedman Diaconis and Sturges. freedman_diaconis", "str = \"_\", drop_first: bool = True, use_numbers_as_suffix: bool =", "Weight Of Evidence (WOE) Table. It tells the predictive power", "ax=ax, **style_kwds, ) # ---# def product(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "limit: int = 5): \"\"\" --------------------------------------------------------------------------- Returns the tail of", "= nb, [], [] while total < int(float(count / int(nbins)))", "gen_colors kwargs[\"color\"] = gen_colors()[0] if not (\"legend\" in kwargs): kwargs[\"legend\"]", "0 ELSE (pt_non_events - pt_events) * ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END", "must be numerical\" ) cast = \"::int\" if (self.parent[numcol].isbool()) else", "sauv[\"count\"] self.catalog[\"percent\"] = ( 100 * sauv[\"count\"] / self.parent.shape()[0] )", "\"vColumn doesn't allow slicing having steps different than 1.\" )", "--------------------------------------------------------------------------- Returns the vColumn expected store usage (unit: b). Returns", "with a user defined Encoding. vDataFrame[].discretize : Discretizes the vColumn.", "\".join(query), nbins - 1 ) result = executeSQL( query=query, title=\"Computing", "(\"step\", step, [int, float])]) return self.apply(func=\"SUBSTR({}, {}, {})\".format(\"{}\", start, step))", "**style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the Time Series of the", "BY {};\".format( by[0], fun, self.alias, self.parent.__genSQL__(), by[0] ) result =", "\"\"\" --------------------------------------------------------------------------- Computes the number of matches for the regular", "numerical to use a mean encoding\" ) max_floor = len(self.parent[response].transformations)", "\\ / / / # \\_______/ / / # ______", "Parameters ---------- pat: str regular expression. Returns ------- vDataFrame self.parent", "greater than the threshold it will be considered as an", "--------------------------------------------------------------------------- Returns the head of the vColumn. Parameters ---------- limit:", "store_usage # ---# def str_contains(self, pat: str): \"\"\" --------------------------------------------------------------------------- Verifies", "a mean encoding with {} as Response Column.\".format( self.alias, response", ": Propagation of the first element (Constant Interpolation). mean :", "is greater than the threshold it will be considered as", "vDataFrame[].str_replace : Replaces the regular expression matches in each of", "The axes to plot on. **style_kwds Any optional parameter to", "to the power of another number round : rounds a", "--------------------------------------------------------------------------- Draws the pie chart of the vColumn based on", ") setattr(self.parent, name, new_vColumn) setattr(self.parent, name[1:-1], new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name]", "input method. Parameters ---------- method: str, optional The method to", "{} can not be converted to {}\".format( e, self.alias, dtype", "if self.category() == \"float\" else \"\" if index < 0:", "if ((is_date) and not (method == \"categorical\")) or ( method", "self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import ts_plot return ts_plot(", "and (val == None): val = self.mode(dropna=True) if val ==", "input method. \"\"\" check_types([(\"lower\", lower, [float, int]), (\"upper\", upper, [float,", "ORDER BY _verticapy_cnt_ DESC {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.shape()[0], self.parent.__genSQL__(),", "parameter 'numcol' must be a vDataFrame column if the method", "must be a list of at least one element to", "\"text\") if (self.isnum() and method == \"same_freq\") or ( self.isnum()", "will be slower if the vDataFrame has been transformed multiple", "median min : minimum mode : most occurent element percent", "xlim: tuple, optional Set the x limits of the current", "self.parent See Also -------- vDataFrame.astype : Converts the vColumns to", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"avg\"]).values[self.alias][0]", "True, ): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using the input", "k in range(max_floor): self.parent[copy_name].transformations += [ (\"{}\", self.ctype(), self.category()) ]", "example, time = '03-11-1993' will filter the data when 'ts'", "category(self): \"\"\" --------------------------------------------------------------------------- Returns the category of the vColumn. The", "False ): \"\"\" --------------------------------------------------------------------------- Normalizes the input vColumns using the", "False, step: bool = False, ax=None, **style_kwds, ): \"\"\" ---------------------------------------------------------------------------", "has no mode (only missing values).\\nNothing was filled.\".format( self.alias )", "must be equal to the length of parameter 'labels' +", "str, optional Method to use to fill the vColumn outliers.", "check_types([(\"x\", x, [int, float])]) return self.apply(func=\"{} * ({})\".format(\"{}\", x)) #", "of {}.\".format( k if k > 0 else \"\", self.alias", "not (prefix) else prefix.replace('\"', \"_\") + prefix_sep.replace('\"', \"_\") ) n", "than November 1993 the 3rd. plot_median: bool, optional If set", "0, Exception( \"Not enough values to compute the Equal Frequency", "example, if n = 1 then this method will return", ": Encodes the vColumn with One-Hot Encoding. vDataFrame[].label_encode : Encodes", "transformed with the func 'x -> {}'.\".format( self.alias.replace('\"', \"\"), func.replace(\"{}\",", "DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY split_value::float\".format( \" UNION ALL", "[self.alias]).plot( ax=ax, **style_kwds ) model.drop() return result except: model.drop() raise", "\".join([quote_ident(column) + desc for column in order_by]) new_column = \"COALESCE({},", "# ---# def boxplot( self, by: str = \"\", h:", "for elem in by: if len(self.parent[elem].transformations) > max_floor: max_floor =", "self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += [(new_column, ctype, category)]", "expression to replace. value: str, optional New value. Returns -------", "elements in the vColumn. Parameters ---------- n: int, optional Offset.", "different aggregations.\", method=\"fetchall\", ) for idx, elem in enumerate(result): result[idx][0]", "a bijection from the different categories to [0, n -", "check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_SUBSTR({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) )", "[ (\"method\", method, [\"winsorize\", \"null\", \"mean\"]), (\"alpha\", alpha, [int, float]),", "License, Version 2.0 (the \"License\"); # You may not use", "vColumns outliers using the input method. Parameters ---------- method: str,", "SQL expression. by: list, optional vColumns used in the partition.", "== \"robust_zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"approx_50%\"]) / ( 1.4826", "the vColumn with a user-specified rule. Parameters ---------- val: int/float/str,", "vDataFrame.eval : Evaluates a customized expression. \"\"\" check_types([(\"name\", name, [str])])", "partition_by, order_by_ts ) if method in (\"mean\", \"median\") or isinstance(val,", "[str, datetime.datetime, datetime.date]), (\"area\", area, [bool]), (\"step\", step, [bool]), ]", "the vColumn based on an aggregation. \"\"\" if isinstance(pie_type, str):", "distinct_elements not in ([0, 1], [1, 0]) or self.isbool(): all_new_features", "aad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'aad' (Average Absolute", "--------------------------------------------------------------------------- Aggregates the vColumn using 'aad' (Average Absolute Deviation). Returns", ": logarithm log10 : base 10 logarithm mod : remainder", "Uses the Gaussian distribution to identify outliers. After normalizing the", "query=query, title=\"Computing the quantiles of {}.\".format(self.alias), method=\"fetchrow\", ) if method", "offset. For example, if n = 1 then this method", "right: bool, optional How the intervals should be closed. If", "optional Uses the Gaussian distribution to identify outliers. After normalizing", "{} LIMIT 1\".format( cmax, cmin, self.parent.__genSQL__() ), print_time_sql=False, ) except:", "---# def mad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'mad'", "using Random Forest.\", method=\"fetchall\", ) result = [elem[0] for elem", "10): \"\"\" --------------------------------------------------------------------------- Returns the n smallest elements in the", "the regular expression is in each of the vColumn records.", "Draws the bar chart of the vColumn based on an", "\"<=\", \"<\", \"[\", \"[\" if idx == 0 and include_lowest:", "---# def kurtosis(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'kurtosis'.", "(\"breaks\", breaks, [list]), (\"labels\", labels, [list]), (\"include_lowest\", include_lowest, [bool]), (\"right\",", "end_date, plot_median, ax=ax, **style_kwds, ) # ---# def rename(self, new_name:", "check_types([(\"n\", n, [int, float])]) query = \"SELECT * FROM {}", "self.parent # ---# def nsmallest(self, n: int = 10): \"\"\"", "optional How the intervals should be closed. If set to", "encoding of a response. \"\"\" if self.category() in [\"date\", \"float\"]:", "the vColumn and it returns the new vDataFrame of the", "of the 'topk' method. new_category: str, optional The name of", "(\"approx\", approx, [bool]))]) prefix = \"approx_\" if approx else \"\"", "numbers as suffix instead of the vColumns categories. Returns -------", "input value. \"\"\" check_types([(\"start\", start, [int, float]), (\"step\", step, [int,", "self.catalog: total += sys.getsizeof(elem) return total # ---# def min(self):", "WEEK / YEAR Returns ------- vDataFrame self.parent See Also --------", "{\"index\": [\"top{}\".format(n)], self.alias: [top]} ) return top # ---# def", "it can be 'minute' 'hour'... start: bool, optional If set", "ctype(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn DB type. Returns -------", "to return. Returns ------- tablesample An object containing the result.", "new vColumn to the vDataFrame by using an advanced analytical", "= skewness # ---# def slice(self, length: int, unit: str", "if verticapy.options[\"print_info\"]: print(\"The mean encoding was successfully done.\") return self.parent", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'std' (Standard Deviation). Returns", "float / text / binary / spatial / uuid /", "Returns the n largest vColumn elements. \"\"\" check_types([(\"n\", n, [int,", ") else: final_transformation = [ ( \"({} - {}) /", "to the offset. For example, if n = 1 then", "using the input list. Parameters ---------- breaks: list List of", "limit = \"\" query = \"(SELECT {} FROM {}{} OFFSET", "distribution to define the outliers. After normalizing the data (Z-Score),", "str regular expression. Returns ------- vDataFrame self.parent See Also --------", "---# def pie( self, method: str = \"density\", of: str", "100 * sauv[\"count\"] / self.parent.shape()[0] ) except: pass self.parent.__add_to_history__( \"[Discretize]:", "\"\"\" if isinstance(by, str): by = [by] if isinstance(order_by, str):", "result if elem[1] != None ] ), ) cmax =", "self.parent See Also -------- vDataFrame[].decode : Encodes the vColumn with", "Returns the k most occurent elements, how often they occur,", "\"NULL\", ) for elem in result if elem[2] != None", "outliers. mean : Replaces the upper and lower outliers by", "order_by] for elem in all_partition: if len(self.parent[elem].transformations) > max_floor: max_floor", "= (sauv[elem] - sauv[\"min\"]) / ( sauv[\"max\"] - sauv[\"min\"] )", "= ( '\"{}{}\"'.format(prefix, k) if (use_numbers_as_suffix) else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"',", "return_trans, [bool]), ] ) method = method.lower() self.parent.are_namecols_in(by) by =", "in [\"mode\", \"0ifnull\"]: max_floor = 0 all_partition = by if", ") try: result = model.fit(self.parent.__genSQL__(), [self.alias]).plot( ax=ax, **style_kwds ) model.drop()", "and the MAX. MAX = MIN !\".format( self.alias ) warnings.warn(warning_message,", "elem in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) try: if", "Density Plot. Parameters ---------- by: str, optional vColumn to use", "\"percent\" in elem: self.catalog[elem] = sauv[elem] elif elem == None:", "Returns the tail of the vColumn. \"\"\" check_types([(\"limit\", limit, [int,", "optional Interval width of the bar. If empty, an optimized", "(\"copy_name\", copy_name, [str])]) try: try: ctype = get_data_types( \"SELECT {}", "number of points to use to evaluate the approximate density", "if method in [\"ffill\", \"pad\", \"bfill\", \"backfill\"]: all_partition += [elem", "|______ / / / # |____/ / / # _____________", "---------- prefix: str, optional Prefix of the dummies. prefix_sep: str,", "pt_non_events, events / NULLIFZERO(SUM(events) OVER ()) AS pt_events FROM ({})", "response: str): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using the average", "\"\"\" return self.aggregate([\"max\"]).values[self.alias][0] # ---# def mean_encode(self, response: str): \"\"\"", "by[0], ), title=\"Computing the different categories to normalize.\", method=\"fetchall\", )", "**style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the pie chart of the", ": Keeps the topk most frequent categories and merge the", "= \"\"\"(SELECT {0} || '', COUNT(*) FROM vdf_table GROUP BY", "100 * (int(sauv[\"count\"]) + total) / self.parent.shape()[0] ) except: pass", "vColumn. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode :", "Matplotlib axes object See Also -------- vDataFrame[].bar : Draws the", "alias. catalog, dict : Catalog of pre-computed aggregations. parent, vDataFrame", "values used to cut the vColumn. labels: list, optional Labels", "mean_alpha, mean_1_alpha = [ item[0] for item in executeSQL( query=query,", "stddev == 0: warning_message = \"Can not normalize {} using", "= \"\" if (k < 1) else \"LIMIT {}\".format(k) dropna", "1 ) result = executeSQL( query=query, title=\"Computing the optimized histogram", "the vDataFrame parent and do not apply it. This parameter", "vColumn {} was renamed '{}'.\".format(old_name, new_name) ) return parent #", ": Returns True if the vColumn is boolean. vDataFrame[].isdate :", "aggregation. \"\"\" check_types( [ (\"by\", by, [str]), (\"method\", method, [str]),", "in the vDataFrame history. Returns ------- vDataFrame self.parent See Also", "= \"Others\", RFmodel_params: dict = {}, response: str = \"\",", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" check_types([(\"approx\", approx,", "of the current axes. ax: Matplotlib axes object, optional The", "--------------------------------------------------------------------------- Verifies if the regular expression is in each of", "COUNT(*) DESC LIMIT {1})\"\"\".format( self.alias, max_cardinality ) if distinct_count >", "history. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.drop: Drops", "( \"WHEN {} < {} THEN {} \".format(\"{}\", lower, lower)", "): query = \"\"\"(SELECT {0} || '', COUNT(*) FROM vdf_table", "xlim=xlim_tmp, store=False, ) try: result = model.fit(self.parent.__genSQL__(), [self.alias]).plot( ax=ax, **style_kwds", "vColumn using 'median'. Parameters ---------- approx: bool, optional If set", "Parameters ---------- new_name: str The new vColumn alias. Returns -------", "learning and scoring phases. xlim: tuple, optional Set the x", "str_extract(self, pat: str): \"\"\" --------------------------------------------------------------------------- Extracts the regular expression in", "the vColumn. Returns ------- list Distinct caterogies of the vColumn.", "self.transformations] total = self.count() if method not in [\"mode\", \"0ifnull\"]:", "limit, [int, float]), (\"offset\", offset, [int, float])]) if offset <", "self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index_start, limit, ) return vDataFrameSQL(query) elif isinstance(index,", "= self.min() xmax = self.max() else: xmin, xmax = xlim", "/ SECOND / TIME ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE /", "VERTICAPY_SUBTABLE) ORDER BY count DESC\" ).format(self.alias, max_cardinality + 1) query", "self.parent.__genSQL__(), by[0] ) result = executeSQL( query, title=\"Computing the different", "self.aggregate([\"sem\"]).values[self.alias][0] # ---# def skewness(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "= \"\", end_date: Union[str, datetime.datetime, datetime.date] = \"\", area: bool", "will be generated. include_lowest: bool, optional If set to True,", "Union # VerticaPy Modules import verticapy from verticapy.utilities import *", "import * from verticapy.errors import * ## # # __", "order_by, ParameterError( \"If the method is in ffill|pad|bfill|backfill then 'order_by'", "has been transformed multiple times, so it's better practice to", "input aggregations. \"\"\" return self.aggregate([\"skewness\"]).values[self.alias][0] skew = skewness # ---#", "to_replace.replace(\"'\", \"''\"), value.replace(\"'\", \"''\") ) ) # ---# def str_slice(self,", ": Computes the vDataFrame input aggregations. \"\"\" check_types([(\"x\", x, [int,", "the different vColumn categories. Parameters ---------- response: str Response vColumn.", "= args[0] elif \"column\" in kwargs: column = kwargs[\"column\"] else:", "to compute the best splits when 'method' is set to", "Distinct caterogies of the vColumn. See Also -------- vDataFrame.topk :", "self.catalog[elem] = (sauv[elem] - sauv[\"approx_50%\"]) / ( 1.4826 * sauv[\"mad\"]", "= \"density\", of: str = \"\", max_cardinality: Union[int, tuple] =", "events FROM ({}) x GROUP BY 1\".format( self.alias, y, y,", "input aggregations. \"\"\" return self.aggregate([\"kurtosis\"]).values[self.alias][0] kurt = kurtosis # ---#", "\"{}\", length, unit.upper(), start_or_end ) ) # ---# def spider(", "--------------------------------------------------------------------------- Multiplies the vColumn by the input element. Parameters ----------", "can also be a cutomized aggregation (ex: AVG(column1) + 5).", "Warning) elif self.isnum(): if method == \"zscore\": if n ==", "return self.apply(func=\"SUBSTR({}, {}, {})\".format(\"{}\", start, step)) # ---# def sub(self,", "# Standard Python Modules import math, re, decimal, warnings, datetime", "method=\"fetchrow\", ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = result sturges", "\"<=\", \"]\", \"]\" else: op1, op2, close_l, close_r = \"<=\",", "= ( 100 * sauv[\"count\"] / self.parent.shape()[0] ) for elem", "-------- vDataFrame.memory_usage : Returns the vDataFrame memory usage. \"\"\" import", "float, tuple] = (None, None), ax=None, **style_kwds, ): \"\"\" ---------------------------------------------------------------------------", "Encodes the vColumn with One-Hot Encoding. vDataFrame[].mean_encode : Encodes the", "Integer corresponding to the offset. For example, if n =", "[] model = KernelDensity( name, bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=xlim_tmp, store=False,", "= [order_by] check_types( [ ( \"method\", method, [ \"auto\", \"mode\",", "The MAD is null !\".format( self.alias ) warnings.warn(warning_message, Warning) return", "the TS. start_date: str / date, optional Input Start Date.", "# / # VerticaPy is a Python library with scikit-like", "k) if (use_numbers_as_suffix) else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") ) )", "regular expression is in each of the vColumn records. vDataFrame[].str_count", "from the vColumn. Parameters ---------- x: float If the vColumn", "= 200, xlim: tuple = None, ax=None, **style_kwds, ): \"\"\"", "{}\".format(distinct_elements[k], k) expr = \", \".join(expr) + \", {})\".format(len(distinct_elements)) self.transformations", "method, [\"winsorize\", \"null\", \"mean\"]), (\"alpha\", alpha, [int, float]), (\"use_threshold\", use_threshold,", "0 avg, stddev = self.aggregate([\"avg\", \"std\"]).values[self.alias] if stddev == 0:", "scikit-like functionality for conducting # data science projects on data", "is only available for categorical variables.\" ) warnings.warn(warning_message, Warning) else:", "ctype = \"int\", \"bool\" else: category, ctype = self.category(), self.ctype()", "def nsmallest(self, n: int = 10): \"\"\" --------------------------------------------------------------------------- Returns the", "(max - min) by: list, optional vColumns used in the", "'\"{}{}\"'.format(prefix, k) if (use_numbers_as_suffix) else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") )", "following list: [\"Fouad\", \"Badr\"] Returns ------- vDataFrame The vDataFrame of", "the next whole number cos : trigonometric cosine cosh :", "if method == \"zscore\": if n == 0: nullifzero =", "[int, float]), (\"nbins\", nbins, [int, float]), (\"h\", h, [int, float]),", "float])]) if n == 1: pre_comp = self.parent.__get_catalog_value__(self.alias, \"top\") if", "else: return self.apply(func=\"{} + ({})\".format(\"{}\", x)) # ---# def add_copy(self,", ") # ---# def avg(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "self.alias, self.alias, self.parent.__genSQL__(), where, self.alias, n ), title=\"Computing the mode.\",", "must have a numerical value\" ) lower_when = ( \"WHEN", "---# def geo_plot(self, *args, **kwargs): \"\"\" --------------------------------------------------------------------------- Draws the Geospatial", "Focus or one of its affiliates. # Licensed under the", "implied. # See the License for the specific language governing", "{})\".format(field, \"{}\")) # ---# def decode(self, *argv): \"\"\" --------------------------------------------------------------------------- Encodes", "nbins used for the discretization (must be > 1) Returns", "return \"({} - {}) / {}({} - {})\".format( self.alias, cmin,", "version you are using. Returns ------- tablesample An object containing", "= \"\"): \"\"\" --------------------------------------------------------------------------- Applies a function to the vColumn.", "vColumn must be numerical for Normalization\") return self.parent # ---#", "else False if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot", "outliers using the input method. Parameters ---------- method: str, optional", "inverse tangent cbrt : cube root ceil : value up", "h: Union[int, float, tuple] = (None, None), ax=None, **style_kwds, ):", "THEN {} || '' ELSE '{}' END)\".format( bin_spatial_to_str(self.category()), \", \".join(", "\"({} BETWEEN {} AND {})\".format(self.alias, p_alpha, p_1_alpha) ) return self.parent", "- {}) / {}({} - {})\".format( \"{}\", cmin, \"NULLIFZERO\" if", "val = self.median() new_column = \"COALESCE({}, {})\".format(\"{}\", val) elif (len(by)", "ELSE argv[n] END Returns ------- vDataFrame self.parent See Also --------", "{} + {}{}) || ']'\".format( \"{}\", h, h, \"{}\", h,", "{}, } for elem in catalog: self.catalog[elem] = catalog[elem] #", "isbool(self): \"\"\" --------------------------------------------------------------------------- Returns True if the vColumn is boolean,", "result = executeSQL( \"SELECT {} FROM (SELECT {}, COUNT(*) AS", "float]), (\"step\", step, [int, float])]) return self.apply(func=\"SUBSTR({}, {}, {})\".format(\"{}\", start,", "date, False otherwise. Returns ------- bool True if the vColumn", "\" \"the parameters ('prefix', 'prefix_sep'), you'll be able to solve", "0: offset = max(0, self.parent.shape()[0] - limit) title = \"Reads", "'approx_10%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.25) AS 'approx_25%',", "= new_name.replace('\"', \"\") assert not (self.parent.is_colname_in(new_name)), NameError( f\"A vColumn has", "in ([0, 1], [1, 0]) or self.isbool(): all_new_features = []", "max_cardinality: int = 6, h: float = 0, pie_type: str", "of {}.\".format(self.alias), method=\"fetchrow\", ) if method == \"winsorize\": self.clip(lower=p_alpha, upper=p_1_alpha)", "BY _verticapy_cnt_ ASC LIMIT 1\".format( self.alias, self.alias, self.parent.__genSQL__(), where, self.alias,", "> 0 else \"\", self.alias ), method=\"fetchall\", ) values =", "True): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using an input 'quantile'.", "isin(self, val: list, *args): \"\"\" --------------------------------------------------------------------------- Looks if some specific", "Standard Python Modules import math, re, decimal, warnings, datetime from", "self elif n == 1: try: result = executeSQL( \"SELECT", "a copy will be created using the input Name. Returns", "Draws the Box Plot of the input vColumns. \"\"\" if", "warning_message = \"Can not normalize {} using the MIN and", "), print_time_sql=False, ) except: new_column = \"COALESCE({}, {}({}) OVER (PARTITION", ": Donut chart. rose : Rose chart. It can also", "usage. Returns ------- float vColumn memory usage (byte) See Also", "seconds, otherwise it will represent a number. Returns ------- vDataFrame", "isinstance(top, decimal.Decimal): top = float(top) self.parent.__update_catalog__( {\"index\": [\"top{}\".format(n)], self.alias: [top]}", "vColumn using the input method. Parameters ---------- method: str, optional", "copy vColumn to the parent vDataFrame. Parameters ---------- name: str", "Of Evidence (WOE) Table. It tells the predictive power of", "by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import ts_plot return", "float = 0, max_cardinality: int = 8, cat_priority: list =", "self.aggregate(func=[\"prod\"]).values[self.alias][0] prod = product # ---# def quantile(self, x: float,", "to convert the vColumn. If this parameter is equal to", "THEN {} ELSE NULL END)\".format( \"{}\", p_alpha, p_1_alpha, \"{}\" )", "def __len__(self): return int(self.count()) # ---# def __nonzero__(self): return self.count()", "max_cardinality=k) # ---# def var(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "AS (SELECT * FROM {}) {}\".format( self.parent.__genSQL__(), query ) query_result", "= \"\", max_cardinality: Union[int, tuple] = (6, 6), h: Union[int,", "will be filled. Returns ------- vDataFrame self.parent See Also --------", "val # # Methods # # ---# def aad(self): \"\"\"", "and include_lowest: op1, close_l = \"<=\", \"[\" elif idx ==", "0: warning_message = \"Can not normalize {} using a Z-Score", "= 0, h: float = 0, ax=None, **style_kwds, ): \"\"\"", "vColumn by transforming the values lesser than the lower bound", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"count\"]).values[self.alias][0] # ---# def cut(", "'count' (Number of non-Missing elements). Returns ------- int number of", "the specific language governing permissions and # limitations under the", "the time series. \"\"\" check_types( [ (\"ts\", ts, [str]), (\"q\",", "vColumn alias. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.add_copy", "the alias {new_name}.\\nBy changing the parameter 'new_name', you'll be able", "Matplotlib axes object See Also -------- vDataFrame.bar : Draws the", "optional The type of pie chart. auto : Regular pie", "catalog=self.catalog, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name[1:-1], new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] +=", "defined Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.", "self.parent See Also -------- vDataFrame[].date_part : Extracts a specific TS", "[ (\"alpha\", alpha, [int, float]), (\"use_threshold\", use_threshold, [bool]), (\"threshold\", threshold,", "labels[idx] else: label = f\"{close_l}{first_elem};{second_elem}{close_r}\" conditions += [ f\"'{first_elem}' {op1}", "to use to order the data\" ) desc = \"\"", "considered during the computation. n: int, optional Integer corresponding to", "train a Random Forest with 20 trees and a maximum", "------- float sem See Also -------- vDataFrame.aggregate : Computes the", "Slices and transforms the vColumn using a time series rule.", "from verticapy.learn.ensemble import ( RandomForestClassifier, RandomForestRegressor, ) drop(tmp_model_name, method=\"model\") if", "A copy of the vColumn {} named {} was added", "absolute deviation). Returns ------- float mad See Also -------- vDataFrame.aggregate", "input value. vDataFrame[].str_slice : Slices the vColumn. \"\"\" check_types([(\"pat\", pat,", "result except: model.drop() raise # ---# def describe( self, method:", "to_tablesample( \"SELECT {} AS {} FROM {}{} LIMIT {} OFFSET", "limit, ) return vDataFrameSQL(query) elif isinstance(index, int): cast = \"::float\"", "# ---# def sem(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "dropna: bool = True): \"\"\" --------------------------------------------------------------------------- Returns the k most", "category, ctype = self.category(), self.ctype() copy_trans = [elem for elem", "'smart'.\" ) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) drop(tmp_view_name, method=\"view\") self.parent.to_db(tmp_view_name) from", ") assert len(breaks) == len(labels) + 1 or not (labels),", "bar. If empty, an optimized h will be computed. pie_type:", "int, optional The integer k of the 'topk' method. new_category:", "Information Value (IV) Table. \"\"\" check_types([(\"y\", y, [str]), (\"nbins\", nbins,", "[ \"'{}', {}\".format(str(distinct_elements[k]).replace(\"'\", \"''\"), k) ] text_info += \"\\t{} =>", "avg) / std robust_zscore : Normalization using the Robust Z-Score", "self.alias, ), \"apply_test_feature\", ) category = get_category_from_vertica_type(ctype=ctype) all_cols, max_floor =", ": sum std : standard deviation topk : kth most", "{} < {}) UNION ALL (SELECT AVG({}) FROM vdf_table WHERE", "to skip. Returns ------- tablesample An object containing the result.", "float = 4.0, use_threshold: bool = True, alpha: float =", "the vDataFrame has been transformed multiple times, so it's better", "__ __ __ # /\\ \\ / / /\\ ___\\", "vColumn by dropping the current vColumn and creating a copy", "if (is_date) and not (method == \"categorical\"): result = self.aggregate([\"count\",", "to use to partition the data. method: str, optional The", "< 50): try: result = executeSQL( \"SELECT {}, AVG({}), STDDEV({})", "[sum(result[\"iv\"])] return result # ---# def kurtosis(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "for elem in result] except: drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") raise", "elif self.isnum(): if method == \"zscore\": if n == 0:", "for the mode) topk_percent : kth most occurent element density", "count DESC\" ).format(self.alias, max_cardinality + 1) query = \"WITH vdf_table", "executeSQL( \"SELECT {} FROM (SELECT {}, COUNT(*) AS _verticapy_cnt_ FROM", "use_numbers_as_suffix, [bool]), ] ) distinct_elements = self.distinct() if distinct_elements not", "self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Apply]: The vColumn '{}' was transformed with", "\"\"\" return self.aggregate([\"kurtosis\"]).values[self.alias][0] kurt = kurtosis # ---# def label_encode(self):", "data when 'ts' is lesser than November 1993 the 3rd.", "self.aggregate([\"count\", \"min\", \"max\"]) index = result.values[\"index\"] result = result.values[self.alias] elif", "\"text\", ) elif self.isnum() and method == \"same_freq\": assert nbins", "If the vColumn type is date like (date, datetime ...),", "WHEN {} < {} THEN {} WHEN {} > {}", "query = [ \"(SELECT READ_TREE(USING PARAMETERS model_name = '{}', tree_id", "* from verticapy.toolbox import * from verticapy.errors import * ##", ": Adds a new vColumn to the vDataFrame by using", "of, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"nbins\", nbins, [int, float]),", "\"\"\" --------------------------------------------------------------------------- Returns the n largest vColumn elements. Parameters ----------", "[bool])]) topk = \"\" if (k < 1) else \"LIMIT", "be seen as one column of that relation. vColumns simplify", "(\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int, float]), (\"pie_type\", pie_type,", "= (0.25, 0.75), start_date: Union[str, datetime.datetime, datetime.date] = \"\", end_date:", "of matches for the regular expression in each record of", "= max(sturges, fd) self.parent.__update_catalog__({\"index\": [\"numh\"], self.alias: [best_h]}) if self.category() ==", "aggregations. \"\"\" return self.aggregate([\"mad\"]).values[self.alias][0] # ---# def max(self): \"\"\" ---------------------------------------------------------------------------", "has already the alias of one of the dummies ({name}).\\n\"", "3.0), 1e-99) if method.lower() == \"sturges\": best_h = sturges elif", "max and the min sem : standard error of the", "( \"{}::{}\".format(\"{}\", dtype), dtype, get_category_from_vertica_type(ctype=dtype), ) ] self.parent.__add_to_history__( \"[AsType]: The", "n)) # ---# def sem(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "self.parent except Exception as e: raise QueryError( \"{}\\nError when applying", "distinct elements to be used as categorical (No h will", "self.parent # ---# def count(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "using the input method. Parameters ---------- method: str, optional Method", "\"{}\", h, h, h, floor_end ), \"varchar\", \"text\", ) else:", "if not (h) or h <= 0: if nbins <=", "count(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'count' (Number of", "one parent. catalog: dict, optional Catalog where each key corresponds", "self.min() xmax = self.max() else: xmin, xmax = xlim custom_lines", "= [by] check_types( [ (\"method\", method, [\"zscore\", \"robust_zscore\", \"minmax\"]), (\"by\",", "APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.1) AS 'approx_10%', APPROXIMATE_PERCENTILE", ": difference between the max and the min sem :", "AS _verticapy_cnt_, 100 * COUNT(*) / {} AS percent FROM", "'product'. Returns ------- float product See Also -------- vDataFrame.aggregate :", "\"count\"] + [item[0] for item in query_result] else: result =", "expr: str = \"\", by: list = [], order_by: list", "tail # ---# def isbool(self): \"\"\" --------------------------------------------------------------------------- Returns True if", "{} FROM {} LIMIT 1\".format( cmax, cmin, self.parent.__genSQL__() ), print_time_sql=False,", "vDataFrame where the vColumn is missing. Returns ------- vDataFrame self.parent", "Also -------- vDataFrame.isin : Looks if some specific records are", "total = ( sys.getsizeof(self) + sys.getsizeof(self.alias) + sys.getsizeof(self.transformations) + sys.getsizeof(self.catalog)", "= ( self.alias.replace('\"', \"\") + prefix_sep.replace('\"', \"_\") if not (prefix)", "end_date, [str, datetime.datetime, datetime.date]), (\"area\", area, [bool]), (\"step\", step, [bool]),", "will be one of the following: date / int /", "(Minimum). Returns ------- float/str minimum See Also -------- vDataFrame.aggregate :", "---# def product(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'product'.", "Applies a function to the vColumn. Parameters ---------- func: str,", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame.astype : Converts", ": Catalog of pre-computed aggregations. parent, vDataFrame : Parent of", "\"\"\" check_types( [(\"method\", method, [\"sturges\", \"freedman_diaconis\", \"fd\", \"auto\"])] ) method", "same number of elements. same_width : Computes regular width bins.", "!= None ] ), ) cmax = \"DECODE({}, {}, NULL)\".format(", "numerical : Uses popular numerical aggregations during the computation. max_cardinality:", "y, y, query, ) query = \"SELECT {}, ord, non_events,", "optimal h. auto : Combination of Freedman Diaconis and Sturges.", "Encodes the vColumn using a user-defined encoding. vDataFrame[].discretize : Discretizes", "multiple children vColumns whereas one vColumn can only have one", "alpha: float = 0.05, ): \"\"\" --------------------------------------------------------------------------- Fills the vColumns", "distinct) var : variance Other aggregations could work if it", "\\ \\/\\ \\ \\ \\ \\____ \\ \\ \\_\\ \\", "parent vDataFrame. This parameter is used for testing purpose. Returns", "= \"int\", \"bool\" else: category, ctype = self.category(), self.ctype() copy_trans", "}, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name.replace('\"', \"\"), new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"]", "if isinstance(index, slice): assert index.step in (1, None), ValueError( \"vColumn", "a Random Forest with 20 trees and a maximum depth", "== \"rose\" else False if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of)", "[bool])]) if approx: return self.aggregate(func=[\"approx_unique\"]).values[self.alias][0] else: return self.aggregate(func=[\"unique\"]).values[self.alias][0] # ---#", "slicing. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains :", "-------- vDataFrame[].nlargest : Returns the n largest vColumn elements. \"\"\"", "response: str, optional Response vColumn when method is set to", ": approximative cardinality count : number of non-missing elements cvar", "for idx, elem in enumerate(result): result[idx][0] = ( \"NULL\" if", "\", \".join(by) ) elif method in (\"ffill\", \"pad\", \"bfill\", \"backfill\"):", "approximate median is returned. By setting this parameter to False,", "vColumn. vDataFrame.outliers : Adds a new vColumn labeled with 0", "in each of the vColumn records. vDataFrame[].extract : Extracts the", "to compute the optimal h. auto : Combination of Freedman", "pie( self, method, of, max_cardinality, h, donut, rose, ax=None, **style_kwds,", "self.__dict__[attr] = val # # Methods # # ---# def", "catalog: dict = {} ): self.parent, self.alias, self.transformations = (", "float): \"\"\" --------------------------------------------------------------------------- Divides the vColumn by the input element.", "value of the record is greater than the threshold it", "raise # ---# def describe( self, method: str = \"auto\",", "( \"method\", method, [\"auto\", \"smart\", \"same_width\", \"same_freq\", \"topk\"], ), (\"return_enum_trans\",", "/ # ____________ ______ # / __ `\\ / /", "# VerticaPy aims to do all of the above. The", "can have multiple children vColumns whereas one vColumn can only", "self.parent[response].category() == \"float\": model = RandomForestRegressor(tmp_model_name) else: model = RandomForestClassifier(tmp_model_name)", "color=updated_dict(param, style_kwds, idx)[\"color\"], lw=4, ), ] ax.set_title(\"KernelDensity\") ax.legend( custom_lines, columns,", "index.step in (1, None), ValueError( \"vColumn doesn't allow slicing having", "True, the approximate cardinality is returned. By setting this parameter", "vColumns. Parameters ---------- add_history: bool, optional If set to True,", "def __setattr__(self, attr, val): self.__dict__[attr] = val # # Methods", "Union[str, datetime.datetime, datetime.date] = \"\", area: bool = False, step:", "float, optional Uses the Gaussian distribution to identify outliers. After", "result.values[\"index\"] result = result.values[self.alias] elif (method == \"cat_stats\") and (numcol", "Response Column.\".format( self.alias, response ) ) if verticapy.options[\"print_info\"]: print(\"The mean", "-------- vDataFrame.drop: Drops the input vColumns from the vDataFrame. \"\"\"", "optional If set to True, the lowest element of the", "query = \"SELECT {}, ord, non_events, events, non_events / NULLIFZERO(SUM(non_events)", "[sum(result[\"events\"])] result.values[\"pt_non_events\"] += [\"\"] result.values[\"pt_events\"] += [\"\"] result.values[\"woe\"] += [\"\"]", "None if not (result) else result[0][0] if not (dropna): n", "-= len(self.transformations) if copy_name: self.add_copy(name=copy_name) for k in range(max_floor): self.parent[copy_name].transformations", "Filters the data using the input expression. \"\"\" self.parent.filter(\"{} IS", "warning_message = \"Can not normalize {} using a Robust Z-Score", "True, the intervals will be closed on the right. Returns", "> 1 else \" was \" self.parent.__add_to_history__( \"[Get Dummies]: One", "a function to the input vColumn. \"\"\" check_types( [ (\"breaks\",", "vDataFrame[].isbool : Returns True if the vColumn is boolean. vDataFrame[].isdate", "if fun == \"AVG\": val = self.avg() elif fun ==", "\\ \\_\\ \\ \\_\\ \\ \\_\\\\\"\\_\\ # \\/_/ \\/_____/ \\/_____/", "- {}) WITHIN GROUP (ORDER BY {}) OVER () FROM", "query, title=\"Computing the different aggregations.\", method=\"fetchall\", ) for idx, elem", ") if return_trans: return \"({} - {}) / {}({} -", "aggregate(self, func: list): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using the", "parameter 'labels' + 1 or parameter 'labels' must be empty.\"", "the vColumn. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.", ") upper_when = ( \"WHEN {} > {} THEN {}", "tuple, optional Set the x limits of the current axes.", "to impute the vColumn. method: dict, optional Method to use", "transformed using a mean encoding with {} as Response Column.\".format(", "\"cosh\", \"cot\", \"exp\", \"floor\", \"ln\", \"log\", \"log10\", \"mod\", \"pow\", \"round\",", "to partition the data. bandwidth: float, optional The bandwidth of", "renamed '{}'.\".format(old_name, new_name) ) return parent # ---# def round(self,", "if len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations) max_floor -= len(self.transformations)", "used to draw the Plot. start_date: str / date, optional", "Returns ------- str/float/int vColumn nth most occurent element. See Also", "int = -1, k: int = 6, new_category: str =", "check_types([(\"add_history\", add_history, [bool])]) try: parent = self.parent force_columns = [", "\", \".join( [ \"{}, {}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if elem[0] !=", "sys.getsizeof(self) + sys.getsizeof(self.alias) + sys.getsizeof(self.transformations) + sys.getsizeof(self.catalog) ) for elem", "bools), a RF Classifier otherwise. Example: Write {\"n_estimators\": 20, \"max_depth\":", "# ---# def round(self, n: int): \"\"\" --------------------------------------------------------------------------- Rounds the", "by keeping only the input number of digits after the", "Matplotlib functions. Returns ------- ax Matplotlib axes object See Also", "NOT NULL GROUP BY 1) x ORDER BY verticapy_agg DESC\".format(", "[by] if isinstance(order_by, str): order_by = [order_by] check_types( [ (", "distinct categories of the vColumn. Returns ------- list Distinct caterogies", "( \"NULL\" if (elem[0] == None) else \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) )", "will be computed. nbins: int, optional Number of bins used", "an input value. The vColumn will be transformed. Parameters ----------", "\"Can not normalize {} using a Robust Z-Score - The", "\"[Mean Encode]: The vColumn {} was transformed using a mean", "\" + \" WHEN \".join(conditions) + \" END\" self.apply(func=expr) #", "the data when 'ts' is lesser than November 1993 the", "must be greater or equal to 2.\" ) assert len(breaks)", "self, ts: str, q: tuple = (0.25, 0.75), start_date: Union[str,", "\"regr_sxx\": {}, \"regr_sxy\": {}, \"regr_syy\": {}, } for elem in", "if (by) else \"\" ) order_by_ts = \", \".join([quote_ident(column) +", "'ts' is lesser than November 1993 the 3rd. end_date: str", "record of the vColumn. The vColumn will be transformed. Parameters", "in case of discretization using the method 'smart'.\" ) self.parent.are_namecols_in(response)", "\"\"\" return self.quantile(0.5, approx=approx) # ---# def memory_usage(self): \"\"\" ---------------------------------------------------------------------------", "(\"by\", by, [str]), (\"kernel\", kernel, [\"gaussian\", \"logistic\", \"sigmoid\", \"silverman\"]), (\"bandwidth\",", "FROM ({}) x GROUP BY 1\".format( self.alias, y, y, query,", "float, optional The interval size to convert to use to", "bound. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].fill_outliers :", "the vColumn using 'avg' (Average). Returns ------- float average See", "Series of the vColumn. Parameters ---------- ts: str TS (Time", "For example, to check if Badr and Fouad are in", "vColumn is numerical , 'categorical' otherwise. categorical : Uses only", "split_value, MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE WHERE split_value IS NOT NULL", "BY {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.alias, self.alias, self.alias, )", "4.0, use_threshold: bool = True, alpha: float = 0.05, ):", ") title = \"Reads {} {} smallest elements.\".format(n, self.alias) return", "\"\"\" check_types( [ ( \"func\", func, [ \"abs\", \"acos\", \"asin\",", "str, optional Method to use to normalize. zscore : Normalization", "count : number of non-missing elements cvar : conditional value", "approx else \"\" return self.aggregate(func=[prefix + \"{}%\".format(x * 100)]).values[self.alias][ 0", "'{}' was transformed with the func 'x -> {}'.\".format( copy_name.replace('\"',", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"min\"]).values[self.alias][0] # ---# def mode(self,", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate(func=[\"prod\"]).values[self.alias][0] prod =", "AVG({3}{4}) AS mean, STDDEV({3}{4}) AS std, MIN({3}{4}) AS min, APPROXIMATE_PERCENTILE", "category)] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Apply]: The vColumn '{}' was transformed", "0 and include_lowest: op1, close_l = \"<=\", \"[\" elif idx", ") if func not in (\"log\", \"mod\", \"pow\", \"round\"): expr", "# ---# def quantile(self, x: float, approx: bool = True):", ": hyperbolic cosine cot : trigonometric cotangent exp : exponential", "using a time series rule. \"\"\" return self.apply(func=\"DATE_PART('{}', {})\".format(field, \"{}\"))", "column for column in self.parent._VERTICAPY_VARIABLES_[\"columns\"] ] force_columns.remove(self.alias) executeSQL( \"SELECT *", "input aggregations. \"\"\" check_types([(\"dropna\", dropna, [bool]), (\"n\", n, [int, float])])", "Returns the vColumn DB type. Returns ------- str vColumn DB", "OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ), \"STDDEV({}) OVER", "AS NAs, MIN({}) AS min, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile =", "self.parent.to_db(tmp_view_name) from verticapy.learn.ensemble import ( RandomForestClassifier, RandomForestRegressor, ) drop(tmp_model_name, method=\"model\")", "name=\"view\") tmp_model_name = gen_tmp_name(schema=schema, name=\"model\") assert nbins >= 2, ParameterError(", "\"_\") ) expr = \"DECODE({}, '{}', 1, 0)\".format( \"{}\", str(distinct_elements[k]).replace(\"'\",", "ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the histogram of the", "[item[0] for item in query_result] else: result = ( self.parent.describe(", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"stddev\"]).values[self.alias][0] stddev", "= [ item[0] for item in executeSQL( query=query, title=\"Computing the", ") ) assert not (self.parent.is_colname_in(name)), NameError( f\"A vColumn has already", "drop_outliers( self, threshold: float = 4.0, use_threshold: bool = True,", "COUNT(*) AS _verticapy_cnt_ FROM {}{}GROUP BY {} ORDER BY _verticapy_cnt_", "transformations: list, optional List of the different transformations. Each transformation", "0 for elem in by: if len(self.parent[elem].transformations) > max_floor: max_floor", "encoding of a response. \"\"\" check_types( [ (\"RFmodel_params\", RFmodel_params, [dict]),", "the vColumn using a user-defined encoding. vDataFrame[].discretize : Discretizes the", "name, parent=self.parent, transformations=transformations, catalog={ \"min\": 0, \"max\": 1, \"count\": self.parent.shape()[0],", "Encodes the vColumn with One-Hot Encoding. \"\"\" check_types([(\"response\", response, [str])])", "{}, {} AS ord, {}::int AS {} FROM {}\".format( trans,", "---------- method: str, optional The method to use to discretize", "nb = int(float(count / int(nbins))) assert nb != 0, Exception(", "else: raise TypeError(\"The vColumn must be numerical for Normalization\") return", "else: p_alpha, p_1_alpha = ( self.parent.quantile([alpha, 1 - alpha], [self.alias])", "= self.parent.format_colnames(of) from verticapy.plot import spider as spider_plot return spider_plot(", "for i in range(len(result)): if result[i][2] == None: pass elif", "vColumn. Parameters ---------- n: int, optional Offset. Returns ------- tablesample", "or computed) ax: Matplotlib axes object, optional The axes to", "tmp_query + rp] query = \"WITH vdf_table AS (SELECT *", "fun, self.alias, self.parent.__genSQL__(), by[0] ) result = executeSQL( query, title=\"Computing", "float between 0 and 1 that represents the quantile. For", "vdf_table WHERE {} < {}) UNION ALL (SELECT AVG({}) FROM", "distinct elements to be used as categorical. The less frequent", "{} ORDER BY {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.alias, self.alias,", "this case, the parameter 'numcol' must be defined. numerical :", "keeping only the input number of digits after the comma.", ": Encodes the vColumn with a user defined Encoding. vDataFrame[].discretize", "vColumn using a time series rule. Parameters ---------- length: int", "{}, \"regr_sxy\": {}, \"regr_syy\": {}, } for elem in catalog:", "NULL ORDER BY {} DESC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias,", "dropna, [bool])]) topk = \"\" if (k < 1) else", "-------- vDataFrame[].date_part : Extracts a specific TS field from the", "See Also -------- vDataFrame.memory_usage : Returns the vDataFrame memory usage.", "/ {} AS percent FROM {}{} GROUP BY {} ORDER", "\".join(by) ), \"MIN({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by)", "--------------------------------------------------------------------------- Aggregates the vColumn using 'skewness'. Returns ------- float skewness", "upper outliers.\".format( self.alias ), method=\"fetchall\", ) ] if mean_alpha ==", "('prefix', 'prefix_sep'), you'll be able to solve this \" \"issue.\"", "database type. \"\"\" return self.transformations[-1][2] # ---# def clip(self, lower=None,", "argv[n] END Returns ------- vDataFrame self.parent See Also -------- vDataFrame.case_when", "in case of discretization using the method 'smart'.\" ) assert", "op2, close_l, close_r = \"<\", \"<=\", \"]\", \"]\" else: op1,", "raise QueryError(\"{}\\nAn Error happened during the filling.\".format(e)) if total >", "histogram of the vColumn based on an aggregation. \"\"\" check_types(", "used to transform the vColumn. The function variable must be", "Encodes the vColumn with the One-Hot Encoding algorithm. Parameters ----------", "== \"auto\": pre_comp = self.parent.__get_catalog_value__(self.alias, \"numh\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\":", "Also -------- vDataFrame.plot : Draws the time series. \"\"\" check_types(", "-({}), {})\".format(x, \"{}\")) else: return self.apply(func=\"{} - ({})\".format(\"{}\", x)) #", "+= sys.getsizeof(elem) return total # ---# def min(self): \"\"\" ---------------------------------------------------------------------------", "Also -------- vDataFrame.apply : Applies functions to the input vColumns.", "except: self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"] += [self.alias] if add_history: self.parent.__add_to_history__( \"[Drop]: vColumn {}", "bool = True): \"\"\" --------------------------------------------------------------------------- Drops the vColumn from the", "vColumn using a bijection from the different categories to [0,", "str, optional vColumn to use to partition the data. bandwidth:", "must be defined. numerical : Uses popular numerical aggregations during", "self.alias ) warnings.warn(warning_message, Warning) return self elif n == 1:", "func.replace(\"{}\", \"x\"), ) ) else: for k in range(max_floor): self.transformations", "the vColumn using 'sum'. Returns ------- float sum See Also", "multiple times, so it's better practice to use this method", "int))) else \"\" ) upper_when = ( \"WHEN {} >", "Computes the number of matches for the regular expression in", "\"Can not normalize {} using a Z-Score - The Standard", "instead of the vDataFrame parent and do not apply it.", "is only available on type numeric|date\" ) if self.isnum(): result", "\"({} - {}) / ({})\".format(self.alias, med, mad) else: final_transformation =", "auto : Sets the method to 'numerical' if the vColumn", "Applies a default function to the vColumn. Parameters ---------- func:", "/ DECADE / DOQ / DOW / DOY / EPOCH", "[\"date\", \"float\"]: warning_message = ( \"label_encode is only available for", "to use to normalize. zscore : Normalization using the Z-Score", "encoder was applied to the vColumn {}\\n{} feature{}created: {}\".format( self.alias,", "Exception( \"Not enough values to compute the Equal Frequency discretization\"", "Draws the Bar Chart of vColumn based on an aggregation.", "executeSQL(query, title=\"Testing the Type casting.\") self.transformations += [ ( \"{}::{}\".format(\"{}\",", "order_by = [order_by] check_types( [ ( \"method\", method, [ \"auto\",", "range(parameters[\"n_estimators\"]) ] query = \"SELECT split_value FROM (SELECT split_value, MAX(weighted_information_gain)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "inverse sine atan : trigonometric inverse tangent cbrt : cube", "else \"AVG\" if by == []: if fun == \"AVG\":", "self.category())] self.transformations += [(func, ctype, category)] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Apply]:", "float]), (\"unit\", unit, [str]), (\"start\", start, [bool]), ] ) start_or_end", "hyperbolic sine sqrt : arithmetic square root tan : trigonometric", "instead of the parent vDataFrame. This parameter is used for", "True if the vColumn is numerical. \"\"\" return self.category() ==", "vDataFrame.outliers : Adds a new vColumn labeled with 0 and", "using the method 'smart'.\" ) assert response, ParameterError( \"Parameter 'response'", "right: bool = True, ): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn", "of bins used for the discretization (must be > 1)", "the response is numerical (except ints and bools), a RF", "right: op1, op2, close_l, close_r = \"<\", \"<=\", \"]\", \"]\"", "this parameter to False, the function's performance can drastically decrease.", "{}.\".format( numcol, self.alias ) values = to_tablesample(query, title=title).values elif (", "float sum See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "Z-Score - The MAD is null !\".format( self.alias ) warnings.warn(warning_message,", "tmp_query = \"\"\"SELECT '{0}' AS 'index', COUNT({1}) AS count, 100", "int): if index_stop < 0: index_stop += self.parent.shape()[0] limit =", "Number of elements. density : Percentage of the distribution. mean", ": Returns the a part of the vColumn. \"\"\" return", "!= \"cat_stats\": values = { \"index\": [\"name\", \"dtype\"] + index,", "write the following list: [\"Fouad\", \"Badr\"] Returns ------- vDataFrame The", "elem[2] != None ] ), ) executeSQL( \"SELECT {}, {}", "trans, self.alias, self.alias, y, y, self.parent.__genSQL__(), ) query = \"SELECT", "Diaconis [2 * IQR / n ** (1 / 3)]", "EPOCH / HOUR / ISODOW / ISOWEEK / ISOYEAR /", "\"{}\" ) ) elif method == \"mean\": query = \"WITH", "def _repr_html_(self): return self.head(limit=verticapy.options[\"max_rows\"])._repr_html_() # ---# def __setattr__(self, attr, val):", "columns = [self.alias] if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from", "k: int = -1, dropna: bool = True): \"\"\" ---------------------------------------------------------------------------", "get_dummies( self, prefix: str = \"\", prefix_sep: str = \"_\",", "aggregations. parent, vDataFrame : Parent of the vColumn. transformations, str", "max_cardinality, nbins, h, ax=ax, **style_kwds) # ---# def boxplot( self,", "* FROM {}) {}\".format( self.parent.__genSQL__(), \" UNION ALL \".join(query) )", "fillna( self, val=None, method: str = \"auto\", expr: str =", "be computed if the parameter is empty or invalid. max_cardinality:", "to_replace, [str]), (\"value\", value, [str])]) return self.apply( func=\"REGEXP_REPLACE({}, '{}', '{}')\".format(", ") new_vColumn = vColumn( name, parent=self.parent, transformations=[item for item in", "data around for processing, VerticaPy brings the logic to the", "[str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---#", "+= [(func, ctype, category)] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Apply]: The vColumn", "isinstance(xlim, (tuple, list)): xlim_tmp = [xlim] else: xlim_tmp = []", "= self.parent.format_colnames(by) from verticapy.plot import ts_plot return ts_plot( self, ts,", "the vColumn using 'mad' (median absolute deviation). Returns ------- float", "parent # ---# def drop_outliers( self, threshold: float = 4.0,", "width of the vColumns 1 and 2 bars. It is", "See Also -------- vDataFrame.fill_outliers : Fills the outliers in the", "check_types( [ (\"RFmodel_params\", RFmodel_params, [dict]), (\"return_enum_trans\", return_enum_trans, [bool]), (\"h\", h,", "transformations, str : List of the different transformations. \"\"\" #", "/ # \\ / / / # \\/ / /", "] self.parent[copy_name].transformations += [(func, ctype, category)] self.parent[copy_name].catalog = self.catalog self.parent.__add_to_history__(", "self.iloc(limit=limit, offset=-1) # ---# def topk(self, k: int = -1,", "will be computed. ax: Matplotlib axes object, optional The axes", "---# def add(self, x: float): \"\"\" --------------------------------------------------------------------------- Adds the input", "numerical / date-like vColumns.\" ) assert len(breaks) >= 2, ParameterError(", "represent the number of seconds, otherwise it will represent a", "vColumn using the input list. Parameters ---------- breaks: list List", "approx: bool = True, ): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "** (1.0 / 3.0), 1e-99) if method.lower() == \"sturges\": best_h", "normalize( self, method: str = \"zscore\", by: list = [],", ") else: cmax, cmin = ( \"MAX({}) OVER (PARTITION BY", "------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains : Verifies if", "name, new_vColumn) setattr(self.parent, name.replace('\"', \"\"), new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] all_new_features", "warnings.warn(warning_message, Warning) elif self.isnum(): if method == \"zscore\": if n", "FLOOR({} / {}) * {} || ';' || (FLOOR({} /", "The Standard Deviation is null !\".format( self.alias ) warnings.warn(warning_message, Warning)", "parent # ---# def round(self, n: int): \"\"\" --------------------------------------------------------------------------- Rounds", "{} FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE\".format( self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index_start,", "the data using the input expression. \"\"\" self.parent.filter(\"{} IS NOT", "columns=[self.alias]) self.parent.__add_to_history__( \"[Mean Encode]: The vColumn {} was transformed using", "spider_plot( self.parent, columns, method, of, max_cardinality, h, ax=ax, **style_kwds, )", "be one of the following: CENTURY / DAY / DECADE", "self.parent.__genSQL__(), by[0], ), title=\"Computing the different categories to normalize.\", method=\"fetchall\",", "_ __|_. _ _ |_) # \\/ (/_| | |(_(_||", "DOQ / DOW / DOY / EPOCH / HOUR /", "str, optional The describe method. auto : Sets the method", "missing value{} filled.\".format( total, self.alias, conj, ) ) else: if", "self.parent See Also -------- vDataFrame.filter: Filters the data using the", "> 6) else \"mode\" total = self.count() if (method ==", "e, self.alias, dtype ) ) # ---# def avg(self): \"\"\"", "def boxplot( self, by: str = \"\", h: float =", "\"density\", of: str = \"\", max_cardinality: int = 6, h:", "i] THEN argv[2 * i + 1] ... END odd", "(\"ffill\", \"pad\", \"bfill\", \"backfill\"): assert order_by, ParameterError( \"If the method", "two input quantiles. Parameters ---------- ts: str TS (Time Series)", "evaluating some conditions. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode :", "the vColumn cardinality). Returns ------- vDataFrame self.parent See Also --------", "(isinstance(upper, (float, int))) else \"\" ) func = \"(CASE {}{}ELSE", "occurent element density unique : cardinality (count distinct) var :", "alias of one of the dummies ({name}).\\n\" \"It can be", "Returns ------- ax Matplotlib axes object See Also -------- vDataFrame.donut", "will be transformed. Parameters ---------- pat: str Regular expression. Returns", "Aggregates the vColumn using 'var' (Variance). Returns ------- float var", "which that stores all user transformations. If the vDataFrame represents", ": Silverman kernel. nbins: int, optional Maximum number of points", "True if the vColumn is numerical, False otherwise. Returns -------", ": Applies a function to the input vColumn. \"\"\" check_types([(\"n\",", "AS min, APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.1) AS", "50): try: if fun == \"MEDIAN\": fun = \"APPROXIMATE_MEDIAN\" query", "): \"\"\" --------------------------------------------------------------------------- Normalizes the input vColumns using the input", "\"cov\": {}, \"pearson\": {}, \"spearman\": {}, \"spearmand\": {}, \"kendall\": {},", "in sauv: if \"top\" in elem: if \"percent\" in elem:", "‘pipeline’ mechanism to sequentialize # data transformation operations, and offers", "the License is distributed on an \"AS IS\" BASIS, #", "** (1 / 3)] sturges : Sturges [CEIL(log2(n)) + 1]", "{} FROM {}\".format( trans, self.alias, self.alias, y, y, self.parent.__genSQL__(), )", "Name. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.apply :", "sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) try: if \"count\" in sauv:", "{} < {} THEN {} \".format(\"{}\", lower, lower) if (isinstance(lower,", "the approximate median is returned. By setting this parameter to", "vColumn has already the alias {name}.\\nBy changing the parameter 'name',", "function. Increasing this parameter will increase the precision but will", "the vColumn using a bijection from the different categories to", "vDataFrame[].str_slice : Slices the vColumn. \"\"\" check_types([(\"to_replace\", to_replace, [str]), (\"value\",", "the Gaussian distribution to identify outliers. After normalizing the data", "and a maximum depth of 10. response: str, optional Response", "as suffix instead of the vColumns categories. Returns ------- vDataFrame", "of ambiguous columns naming.\\nBy changing one of \" \"the parameters", "closed. If set to True, the intervals will be closed", "method 'smart'.\" ) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) drop(tmp_view_name, method=\"view\") self.parent.to_db(tmp_view_name)", "the vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_SUBSTR({}, '{}')\".format(\"{}\",", "performance can drastically decrease. Returns ------- int vColumn cardinality (or", "self.parent # ---# def mad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "(\"max_cardinality\", max_cardinality, [list]), (\"h\", h, [list, float, int]), ] )", "= \"s were \" if len(all_new_features) > 1 else \"", "[str]), (\"copy_name\", copy_name, [str])]) try: try: ctype = get_data_types( \"SELECT", "{1})\"\"\".format( self.alias, max_cardinality ) if distinct_count > max_cardinality: query +=", "DOW / DOY / EPOCH / HOUR / ISODOW /", "(Constant Interpolation). mean : Average. median : median. mode :", "vColumn DB type. \"\"\" return self.transformations[-1][1].lower() dtype = ctype #", "else \"LIMIT {}\".format(k) dropna = \" WHERE {} IS NOT", "str = \"auto\", expr: str = \"\", by: list =", "the vColumns are numerical. Optimized h will be computed if", "area: bool, optional If set to True, draw an Area", "self.parent.format_colnames(ts) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import", "(count) ** (1.0 / 3.0), 1e-99) if method.lower() == \"sturges\":", "FROM {}\".format( trans, self.alias, self.alias, y, y, self.parent.__genSQL__(), ) query", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"skewness\"]).values[self.alias][0] skew =", "\"\"\" --------------------------------------------------------------------------- Slices and transforms the vColumn using a time", "by: list = [], return_trans: bool = False ): \"\"\"", "\"\"\" check_types([(\"x\", x, [int, float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, {},", "\"index\": [\"name\", \"dtype\"] + index, \"value\": [self.alias, self.ctype()] + result,", "Draws the histogram of the vColumn based on an aggregation.", "---------- length: int Slice size. unit: str, optional Slice size", "return self.apply(func=st.decode(str_sql(\"{}\"), *argv)) # ---# def density( self, by: str", "using the method 'topk'\" ) distinct = self.topk(k).values[\"index\"] trans =", "left\", bbox_to_anchor=[1, 0.5], ) ax.set_xlabel(self.alias) return ax kernel = kernel.lower()", "executeSQL( query, title=\"Computing the different aggregations.\", method=\"fetchall\", ) for idx,", "self.parent.__genSQL__(), ), title=\"Computing the Store Usage of the vColumn {}.\".format(self.alias),", "h will be picked or computed) ax: Matplotlib axes object,", "= \"NULL\" if (elem[1] == None) else str(elem[1]) new_column =", "------- vDataFrame self.parent See Also -------- vDataFrame[].drop_outliers : Drops outliers", "to gain in performance. The catalog will be updated when", "func: str, x: float = 2): \"\"\" --------------------------------------------------------------------------- Applies a", "str(distinct_elements[k]).replace(\"'\", \"''\") ) transformations = self.transformations + [(expr, \"bool\", \"int\")]", "object See Also -------- vDataFrame[].bar : Draws the Bar Chart", "func = str(func) check_types([(\"func\", func, [str]), (\"copy_name\", copy_name, [str])]) try:", "0 limit = \" LIMIT {}\".format(limit) else: limit = \"\"", "of, max_cardinality, h, donut, rose, ax=None, **style_kwds, ) # ---#", "vDataFrame parent and do not apply it. This parameter is", "= round(h, 4) elif h > 0.000001: h = round(h,", "vColumn '{}' was normalized with the method '{}'.\".format( self.alias, method", "bool = True): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'unique'", "'03-11-1993' will filter the data when 'ts' is greater than", "{};\".format( by[0], fun, self.alias, self.parent.__genSQL__(), by[0] ) result = executeSQL(", "argv[2 * i + 1] ... ELSE argv[n] END Returns", "vColumn labeled with 0 and 1 (1 meaning global outlier).", "\"[Add Copy]: A copy of the vColumn {} named {}", "return self.aggregate([\"min\"]).values[self.alias][0] # ---# def mode(self, dropna: bool = False,", "index += self.parent.shape()[0] query = \"SELECT {}{} FROM {}{} OFFSET", "= \"\", \"\" for category in cat: tmp_query = \"\"\"SELECT", "\"{}::{}\".format(\"{}\", dtype), dtype, get_category_from_vertica_type(ctype=dtype), ) ] self.parent.__add_to_history__( \"[AsType]: The vColumn", "the optimized histogram nbins using Random Forest.\", method=\"fetchall\", ) result", "max : Maximum of the vColumn 'of'. sum : Sum", "nbins, [int, float]), ( \"method\", method, [\"auto\", \"smart\", \"same_width\", \"same_freq\",", "that stores all user transformations. If the vDataFrame represents the", "= False, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the Time", "pie_type == \"donut\" else False rose = True if pie_type", "cat_priority = [cat_priority] check_types( [ (\"by\", by, [str]), (\"max_cardinality\", max_cardinality,", "conj, \", \".join(all_new_features) ) + \".\" ) return self.parent one_hot_encode", "index, ) return executeSQL( query=query, title=\"Getting the vColumn element.\", method=\"fetchfirstelem\",", "-> {}' to '{}'\".format( e, func.replace(\"{}\", \"x\"), self.alias.replace('\"', \"\") )", "-------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" check_types([(\"x\",", "this file except in compliance with the License. # You", "in the vColumn. Parameters ---------- n: int, optional Offset. Returns", "including the 2 quantiles used to draw the Plot. start_date:", "issue.\" ) new_vColumn = vColumn( name, parent=self.parent, transformations=[item for item", "\"\"\" return self.aggregate([\"aad\"]).values[self.alias][0] # ---# def abs(self): \"\"\" --------------------------------------------------------------------------- Applies", "ParameterError( \"numh is only available on type numeric|date\" ) if", "relation. vColumns simplify several processes with its abstractions. Parameters ----------", "self.alias: result}) for elem in values: for i in range(len(values[elem])):", "[bool]), ] ) method = method.lower() if self.isnum() and method", "): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using the input list.", "{}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.alias, self.alias, self.alias, ) else:", "elem in catalog: self.catalog[elem] = catalog[elem] # ---# def __getitem__(self,", "for Normalization\") return self.parent # ---# def nsmallest(self, n: int", "will return the transformation used instead of the parent vDataFrame.", ") response_cat = self.parent[y].distinct() response_cat.sort() assert response_cat == [0, 1],", "memory usage (byte) See Also -------- vDataFrame.memory_usage : Returns the", "'minute' 'hour'... start: bool, optional If set to True, the", "return self.apply( func=\"REGEXP_COUNT({}, '{}') > 0\".format(\"{}\", pat.replace(\"'\", \"''\")) ) #", "0, }, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name.replace('\"', \"\"), new_vColumn)", "aggregations. \"\"\" return self.aggregate([\"kurtosis\"]).values[self.alias][0] kurt = kurtosis # ---# def", "be computed. pie_type: str, optional The type of pie chart.", "a function to the input vColumn. \"\"\" check_types([(\"n\", n, [int,", "verticapy.plot import gen_cmap kwargs[\"cmap\"] = gen_cmap()[0] else: if not (\"color\"", "vColumn using 'aad' (Average Absolute Deviation). Returns ------- float aad", "\"float\"]: warning_message = ( \"label_encode is only available for categorical", ": SQL code generation will be slower if the vDataFrame", "elif expr: new_column = \"COALESCE({}, {})\".format(\"{}\", expr) elif method ==", "self.parent.search( \"{} = '{}'\".format(self.parent[by].alias, column) )[self.alias].density( bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=(xmin,", "the vColumn memory usage. Returns ------- float vColumn memory usage", "most occurent elements to return. Returns ------- tablesample An object", "the vColumn with Label Encoding. vDataFrame[].get_dummies : Encodes the vColumn", "ax Matplotlib axes object See Also -------- vDataFrame.donut : Draws", "# ---# def aad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "(the \"License\"); # You may not use this file except", "self.transformations += [(func, ctype, category)] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Apply]: The", "THEN argv[2 * i + 1] ... ELSE argv[n] END", "False else the lower and upper ZScores. threshold: float, optional", "bool = True): \"\"\" --------------------------------------------------------------------------- Returns the k most occurent", "is boolean. vDataFrame[].isdate : Returns True if the vColumn category", "+ tmp_query + rp] query = \"WITH vdf_table AS (SELECT", "example, to check if Badr and Fouad are in the", "\"{}\"), \"int\", \"float\") ] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Mean Encode]: The", "If empty, an optimized h will be computed. pie_type: str,", "datetime ...), the parameter 'x' will represent the number of", "] self.parent.__add_to_history__( \"[AsType]: The vColumn {} was converted to {}.\".format(", "'approx_25%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.5) AS 'approx_50%',", "of the vColumn. transformations, str : List of the different", "{} using a Robust Z-Score - The MAD is null", "Maximum of the vColumn 'of'. sum : Sum of the", "[], include_lowest: bool = True, right: bool = True, ):", "Discretizes the vColumn. vDataFrame[].label_encode : Encodes the vColumn with Label", "will increase the precision but will also increase the time", "if the vColumn is numerical , 'categorical' otherwise. categorical :", "DB type. \"\"\" return self.transformations[-1][1].lower() dtype = ctype # ---#", ") ) query += [lp + tmp_query + rp] query", "raise drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") result = [self.min()] + result", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame.drop: Drops the", "item in executeSQL( query=query, title=\"Computing the average of the {}'s", "different records. For example, to check if Badr and Fouad", "------- str vColumn DB type. \"\"\" return self.transformations[-1][1].lower() dtype =", "(\"plot_median\", plot_median, [bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) from", "a vDataFrame column if the method is 'cat_stats'\" ) distinct_count,", "vColumn. \"\"\" check_types([(\"to_replace\", to_replace, [str]), (\"value\", value, [str])]) return self.apply(", ") drop(tmp_model_name, method=\"model\") if self.parent[response].category() == \"float\": model = RandomForestRegressor(tmp_model_name)", ": Encodes the vColumn using a user-defined encoding. vDataFrame[].discretize :", "taking advantage Vertica’s # speed and built-in analytics and machine", "PARAMETERS percentile = 0.75) AS Q3, MAX({}) AS max FROM", "Subtracts the input element from the vColumn. Parameters ---------- x:", "'max' (Maximum). Returns ------- float/str maximum See Also -------- vDataFrame.aggregate", "[\"auto\", \"numerical\", \"categorical\", \"cat_stats\"]), (\"max_cardinality\", max_cardinality, [int, float]), (\"numcol\", numcol,", "vDataFrame.apply : Applies functions to the input vColumns. vDataFrame.applymap :", "size to convert to use to convert the vColumn. If", "numerical. Optimized h will be computed if the parameter is", "# ---# def product(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "EmptyParameter( \"The parameter 'name' must not be empty\" ) assert", "the values lesser than the lower bound to the lower", "if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import pie", "Parameters ---------- y: str Response vColumn. nbins: int, optional Maximum", "elem in enumerate(result): result[idx][0] = ( \"NULL\" if (elem[0] ==", "= ( result[0], result[3], result[4], result[6], result[7], ) elif self.isdate():", "+ nth_elems + [str(count)]) ) query = \"SELECT {} FROM", "VerticaPy is a Python library with scikit-like functionality for conducting", "|~) _ _| _ /~\\ _ |. # |_)\\/ |_)(_|(_||", "used for the plot. gaussian : Gaussian kernel. logistic :", "{}{} LIMIT {} OFFSET {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(),", "drop_first: bool, optional Drops the first dummy to avoid the", "!= None: new_column = \"COALESCE({}, '{}')\".format(\"{}\", val) elif expr: new_column", "mean_alpha == None: mean_alpha = \"NULL\" if mean_1_alpha == None:", "--------------------------------------------------------------------------- Aggregates the vColumn using 'avg' (Average). Returns ------- float", ") # ---# def spider( self, by: str = \"\",", "+= self.parent.shape()[0] if isinstance(index_stop, int): if index_stop < 0: index_stop", "{}) AS {} FROM {}) VERTICAPY_OPTIMAL_H_TABLE\".format( min_date, self.alias, self.alias, self.parent.__genSQL__()", "optional Number of nbins. If empty, an optimized number of", "abs : absolute value acos : trigonometric inverse cosine asin", "{})\".format(\"{}\", val) elif (len(by) == 1) and (self.parent[by[0]].nunique() < 50):", "Area Plot. step: bool, optional If set to True, draw", "{}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if elem[0] != None else \"NULL\", elem[1]", "it returns the new vDataFrame of the search. Parameters ----------", "{} FROM {} WHERE {} IS NOT NULL LIMIT 20\".format(", "from the vColumn (only if the vColumn type is date", "fun, \"{}\", \", \".join(by) ) elif method in (\"ffill\", \"pad\",", "vColumn using as lower bound quantile(alpha) and as upper bound", "include_lowest: bool = True, right: bool = True, ): \"\"\"", "else result[0][0] if not (dropna): n = \"\" if (n", "Encodes the vColumn using a user-defined encoding. Parameters ---------- argv:", "BY {})\".format( self.alias, \", \".join(by) ), \"STDDEV({}) OVER (PARTITION BY", "the data. count : Number of elements. density : Percentage", "parent=self.parent, transformations=[item for item in self.transformations], catalog=self.catalog, ) setattr(self.parent, name,", "/ MILLENNIUM / MILLISECONDS / MINUTE / MONTH / QUARTER", "\".join(by) ) elif method in (\"ffill\", \"pad\", \"bfill\", \"backfill\"): assert", "1 else \" was \" if verticapy.options[\"print_info\"]: print(\"{} element{}filled.\".format(total, conj))", ": Creates a copy of the vColumn. \"\"\" check_types([(\"new_name\", new_name,", "'smart'.\" ) assert response, ParameterError( \"Parameter 'response' can not be", "one of the following: CENTURY / DAY / DECADE /", "< max_cardinality + 1) and (method != \"numerical\")) or not", "in cat: tmp_query = \"\"\"SELECT '{0}' AS 'index', COUNT({1}) AS", "True, the method will return the transformation used instead of", "check_types( [ (\"ts\", ts, [str]), (\"q\", q, [tuple]), ( \"start_date\",", "to True, draw a Step Plot. ax: Matplotlib axes object,", "= True, ): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'median'.", "and 1 (1 meaning global outlier). \"\"\" check_types( [ (\"alpha\",", "start_date, [str, datetime.datetime, datetime.date]), (\"end_date\", end_date, [str, datetime.datetime, datetime.date]), (\"area\",", "title=\"Computing the Store Usage of the vColumn {}.\".format(self.alias), method=\"fetchfirstelem\", )", "int, float], ), ( \"end_date\", end_date, [str, datetime.datetime, datetime.date, int,", "logarithm mod : remainder of a division operation pow :", "the vColumn using 'max' (Maximum). Returns ------- float/str maximum See", "= float(values[elem][i]) return tablesample(values) # ---# def discretize( self, method:", "\"varchar\", \"text\") if (self.isnum() and method == \"same_freq\") or (", "query = \"SELECT {}::{} AS {} FROM {} WHERE {}", "return self.apply(func=expr) # ---# def astype(self, dtype: str): \"\"\" ---------------------------------------------------------------------------", "\"count\" in sauv: self.catalog[\"count\"] = int(sauv[\"count\"]) + total self.catalog[\"percent\"] =", "= [self.min()] + result + [self.max()] elif method == \"topk\":", "optional Set the x limits of the current axes. ax:", "merging category when using the 'topk' method. RFmodel_params: dict, optional", "if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp store_usage = executeSQL( \"SELECT", "cosh : hyperbolic cosine cot : trigonometric cotangent exp :", "---# def aggregate(self, func: list): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "Line2D( [0], [0], color=updated_dict(param, style_kwds, idx)[\"color\"], lw=4, ), ] ax.set_title(\"KernelDensity\")", "IN ({}) THEN {} || '' ELSE '{}' END)\".format( bin_spatial_to_str(self.category()),", "= [val] val += list(args) check_types([(\"val\", val, [list])]) val =", "def ctype(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn DB type. Returns", ") order_by_ts = \", \".join([quote_ident(column) + desc for column in", "apply it. This parameter is very useful for testing to", "\"\" if index < 0: index += self.parent.shape()[0] query =", "# ---# def date_part(self, field: str): \"\"\" --------------------------------------------------------------------------- Extracts a", ": trigonometric cotangent exp : exponential function floor : value", "events = 0 THEN 0 ELSE (pt_non_events - pt_events) *", "{} AND {}) THEN {} ELSE NULL END)\".format( \"{}\", p_alpha,", "-------- vDataFrame[].decode : Encodes the vColumn with a user defined", "xlim_tmp = [xlim] else: xlim_tmp = [] model = KernelDensity(", "+ 1) and (method != \"numerical\")) or not (is_numeric) or", ": Fills the outliers in the vColumn. vDataFrame.outliers : Adds", "f\"A vColumn has already the alias {new_name}.\\nBy changing the parameter", "self.parent.__update_catalog__(erase=True, columns=[self.alias]) try: if \"count\" in sauv: self.catalog[\"count\"] = sauv[\"count\"]", "---# def add_copy(self, name: str): \"\"\" --------------------------------------------------------------------------- Adds a copy", "Draws the spider plot of the input vColumn based on", ") # ---# def product(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "(vColumn_075 - vColumn_025) / (count) ** (1.0 / 3.0), 1e-99)", "lower outliers by their respective average. null : Replaces the", "[int, float]), (\"cat_priority\", cat_priority, [list]), ] ) if by: self.parent.are_namecols_in(by)", "is date like (date, datetime ...), the parameter 'x' will", "* FROM {} LIMIT 10\".format( self.parent.__genSQL__(force_columns=force_columns) ), print_time_sql=False, ) self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias)", "import math, re, decimal, warnings, datetime from collections.abc import Iterable", "the vColumn is boolean. vDataFrame[].isnum : Returns True if the", "else: best_h = max(sturges, fd) self.parent.__update_catalog__({\"index\": [\"numh\"], self.alias: [best_h]}) if", "h <= 0: if nbins <= 0: h = self.numh()", "Returns ------- float aad See Also -------- vDataFrame.aggregate : Computes", "0 THEN 0 ELSE (pt_non_events - pt_events) * ZEROIFNULL(LN(pt_non_events /", "width. Parameters ---------- method: str, optional Method to use to", "Aggregates the vColumn using 'min' (Minimum). Returns ------- float/str minimum", "used as categorical. The less frequent elements will be gathered", "one unique category. h: float, optional The interval size to", "---------- limit: int, optional Number of elements to display. Returns", "float, approx: bool = True): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "result.values[\"non_events\"] += [sum(result[\"non_events\"])] result.values[\"events\"] += [sum(result[\"events\"])] result.values[\"pt_non_events\"] += [\"\"] result.values[\"pt_events\"]", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"aad\"]).values[self.alias][0] # ---# def abs(self):", "encoding\" ) max_floor = len(self.parent[response].transformations) - len( self.transformations ) for", "1\".format( cmax, cmin, self.parent.__genSQL__() ), print_time_sql=False, ) except: cmax, cmin", "\\_\\ \\ \\_\\ \\ \\_\\\\\"\\_\\ # \\/_/ \\/_____/ \\/_____/ \\/_____/", "other statistical information. Parameters ---------- k: int, optional Number of", "by: str, optional vColumn to use to partition the data.", "LIMIT 1\".format( new_column.format(self.alias), self.parent.__genSQL__() ), print_time_sql=False, ) except: new_column =", "not (\"color\" in kwargs): from verticapy.plot import gen_colors kwargs[\"color\"] =", "pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}') > 0\".format(\"{}\", pat.replace(\"'\", \"''\"))", "vDataFrame memory usage. \"\"\" import sys total = ( sys.getsizeof(self)", "Draws the time series. \"\"\" check_types( [ (\"ts\", ts, [str]),", "aggregations. \"\"\" return self.aggregate(func=[\"prod\"]).values[self.alias][0] prod = product # ---# def", "None: mean_alpha = \"NULL\" if mean_1_alpha == None: mean_alpha =", "+ by) by = self.parent.format_colnames(by) if method == \"auto\": method", "by, start_date, end_date, area, step, ax=ax, **style_kwds, ) # ---#", "vColumn category is date. See Also -------- vDataFrame[].isbool : Returns", "DESC\" ).format(self.alias, max_cardinality + 1) query = \"WITH vdf_table AS", "in Vertica, taking advantage Vertica’s # speed and built-in analytics", "mean_encode(self, response: str): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using the", "in result] elif self.isnum() and method in (\"same_width\", \"auto\"): if", "use to compute the aggregation. h: int/float/tuple, optional Interval width", "max_cardinality: query += ( \"UNION ALL (SELECT 'Others', SUM(count) FROM", "the vColumn using the input method. Parameters ---------- method: str,", "The field to extract. It must be one of the", "of the vColumn. Returns ------- str/float/int vColumn nth most occurent", "(isinstance(val, Iterable)): val = [val] val += list(args) check_types([(\"val\", val,", "\"zscore\": self.catalog[\"mean\"] = 0 self.catalog[\"std\"] = 1 elif method ==", "(\"mean\", \"median\") or isinstance(val, float): category, ctype = \"float\", \"float\"", "Renames the vColumn by dropping the current vColumn and creating", "be able to solve this issue.\" ) new_vColumn = vColumn(", "= self.parent.format_colnames(of) from verticapy.plot import hist return hist(self, method, of,", "\\ \\__| \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_____\\", "arithmetic square root tan : trigonometric tangent tanh : hyperbolic", ") elif method == \"mean\": query = \"WITH vdf_table AS", "aggregation. aad : average absolute deviation approx_unique : approximative cardinality", "for the regular expression in each record of the vColumn.", "\"public\" name = gen_tmp_name(schema=schema, name=\"kde\") if isinstance(xlim, (tuple, list)): xlim_tmp", "cvar : conditional value at risk dtype : vColumn type", "(SELECT 'Others', SUM(count) FROM (SELECT COUNT(*) AS count\" \" FROM", "topk_percent : kth most occurent element density unique : cardinality", "int(sauv[\"count\"]) + total self.catalog[\"percent\"] = ( 100 * (int(sauv[\"count\"]) +", "stddev = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [ \"{},", "- 1 ) result = executeSQL( query=query, title=\"Computing the optimized", "- vColumn_025) / (count) ** (1.0 / 3.0), 1e-99) if", "result[3], result[4], result[6], result[7], ) elif self.isdate(): min_date = self.min()", "tanh : hyperbolic tangent x: int/float, optional If the function", "using the mean encoding of a response. \"\"\" check_types( [", "Random Forest.\", method=\"fetchall\", ) result = [elem[0] for elem in", "the vColumn using the input functions. Parameters ---------- func: list", "speed and built-in analytics and machine learning features. It supports", "* COUNT(*) / {} AS percent FROM {}{} GROUP BY", "-------- vDataFrame.filter: Filters the data using the input expression. \"\"\"", "vColumns from the vDataFrame. \"\"\" check_types([(\"add_history\", add_history, [bool])]) try: parent", "except: new_column = \"COALESCE({}, {}({}) OVER (PARTITION BY {}))\".format( \"{}\",", "regular expression in each record of the vColumn. The vColumn", "the input type. \"\"\" check_types([(\"dtype\", dtype, [str])]) try: query =", "lowest element of the list will be included. right: bool,", "elem[0] != None else \"NULL\", elem[2] if elem[2] != None", "Returns True if the vColumn is boolean, False otherwise. Returns", "vColumn will be transformed. Parameters ---------- to_replace: str Regular expression", "sin : trigonometric sine sinh : hyperbolic sine sqrt :", "mean_1_alpha, \"{}\" ) ) return self.parent # ---# def fillna(", "Returns True if the vColumn category is date. vDataFrame[].isnum :", "def median( self, approx: bool = True, ): \"\"\" ---------------------------------------------------------------------------", "kernel. silverman : Silverman kernel. nbins: int, optional Maximum number", "{}) / ({})\".format(\"{}\", med, mad), \"float\", \"float\", ) ] else:", "'quantile'. Parameters ---------- x: float A float between 0 and", "\\ \\_____\\ \\ \\_\\ \\ \\_\\ \\ \\_\\\\\"\\_\\ # \\/_/", "copy with the specified name. \\u26A0 Warning : SQL code", "skewness # ---# def slice(self, length: int, unit: str =", "the vColumn. vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.", "---# def median( self, approx: bool = True, ): \"\"\"", "mad) minmax : Normalization using the MinMax (min and max).", "new_column = \"COALESCE({}, LAST_VALUE({} IGNORE NULLS) OVER ({} ORDER BY", "0: h = self.numh() else: h = (self.max() - self.min())", "bar( self, method: str = \"density\", of: str = \"\",", "already the alias of one of the dummies ({name}).\\n\" \"It", "verticapy.options[\"temp_schema\"] if not (schema): schema = \"public\" tmp_view_name = gen_tmp_name(schema=schema,", "self.parent.__get_catalog_value__(self.alias, \"numh\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp assert self.isnum()", "str = \"\"): \"\"\" --------------------------------------------------------------------------- Applies a function to the", "), (\"expr\", expr, [str]), (\"by\", by, [list]), (\"order_by\", order_by, [list]),", "1\".format( alpha, self.alias, alpha, self.alias, self.parent.__genSQL__() ) p_alpha, p_1_alpha =", "{}) THEN {} ELSE NULL END)\".format( \"{}\", p_alpha, p_1_alpha, \"{}\"", "method used for the plot. gaussian : Gaussian kernel. logistic", "in each record of the vColumn. The vColumn will be", "{}, \"regr_sxx\": {}, \"regr_sxy\": {}, \"regr_syy\": {}, } for elem", "specified number of decimal places sign : arithmetic sign sin", "the lower and upper ZScores. threshold: float, optional Uses the", "+ 1] if right: op1, op2, close_l, close_r = \"<\",", "lp, rp = \"(\", \")\" else: lp, rp = \"\",", "desc for column in order_by]) new_column = \"COALESCE({}, LAST_VALUE({} IGNORE", "vColumn to use to compute the aggregation. h: int/float/tuple, optional", "optional Maximum number of points to use to evaluate the", "Encoding]: Label Encoding was applied to the vColumn {} using", "mean : Replaces the upper and lower outliers by their", "self.transformations + [(expr, \"bool\", \"int\")] new_vColumn = vColumn( name, parent=self.parent,", "a vColumn can be seen as one column of that", "[elem for elem in order_by] for elem in all_partition: if", "expr = \"{}({}, {})\".format(func.upper(), \"{}\", x) return self.apply(func=expr) # ---#", "---------- func: list List of the different aggregation. aad :", "Encodes the vColumn with user defined Encoding. vDataFrame[].discretize : Discretizes", "[ \"{}, {}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if elem[0] != None else", "n smallest elements in the vColumn. \"\"\" check_types([(\"n\", n, [int,", "LIMIT 0\".format( func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) except:", "--------------------------------------------------------------------------- Draws the Geospatial object. Parameters ---------- *args / **kwargs", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "in case of discretization using the method 'same_freq'\" ) count", "vColumn to the parent vDataFrame. Parameters ---------- name: str Name", "features. use_numbers_as_suffix: bool, optional Uses numbers as suffix instead of", "If empty, an optimized number of bins will be computed.", "It must be one of the following: CENTURY / DAY", "= \"Reads {}.\".format(self.alias) tail = to_tablesample( \"SELECT {} AS {}", "if distinct_elements not in ([0, 1], [1, 0]) or self.isbool():", "not be considered during the computation. Returns ------- tablesample An", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].date_part : Extracts", "(date, datetime ...), the parameter 'x' will represent the number", "parent vDataFrame. Parameters ---------- name: str Name of the copy.", "The aggregations used are the median and two input quantiles.", "[str, datetime.datetime, datetime.date]), (\"end_date\", end_date, [str, datetime.datetime, datetime.date]), (\"area\", area,", "------- ax Matplotlib axes object See Also -------- vDataFrame.plot :", "of the vColumn. \"\"\" check_types([(\"limit\", limit, [int, float]), (\"offset\", offset,", "if check: self.parent.are_namecols_in(column) column = self.parent.format_colnames(column) columns += [column] if", "object See Also -------- vDataFrame.bar : Draws the Bar Chart", "{ \"index\": [\"name\", \"dtype\"] + index, \"value\": [self.alias, self.ctype()] +", "numerical and Mode for the categorical vColumns. bfill : Back", "skewness(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'skewness'. Returns -------", "argv[2 * i] THEN argv[2 * i + 1] ...", "2 DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY split_value::float\".format( \" UNION", "ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS woe, CASE WHEN non_events", "executeSQL( query=query, title=\"Computing the average of the {}'s lower and", "= ( 100 * (int(sauv[\"count\"]) + total) / self.parent.shape()[0] )", "force_columns.remove(self.alias) executeSQL( \"SELECT * FROM {} LIMIT 10\".format( self.parent.__genSQL__(force_columns=force_columns) ),", "lower bound itself and the values higher than the upper", "catalog[elem] # ---# def __getitem__(self, index): if isinstance(index, slice): assert", "max_floor = self.parent.get_columns(), 0 for column in all_cols: try: if", "Type casting.\") self.transformations += [ ( \"{}::{}\".format(\"{}\", dtype), dtype, get_category_from_vertica_type(ctype=dtype),", "not apply it. This parameter is very useful for testing", "value. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains :", "\"auto\": method = \"mean\" if (self.isnum() and self.nunique(True) > 6)", "different categories to normalize.\", method=\"fetchall\", ) for i in range(len(result)):", "bool = False, ): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn with", "optional Maximum number of nbins used for the discretization (must", "ts_plot return ts_plot( self, ts, by, start_date, end_date, area, step,", "to True, the information will be stored in the vDataFrame", "to a specified number of decimal places sign : arithmetic", "self.alias, alpha, self.alias, self.parent.__genSQL__() ) p_alpha, p_1_alpha = executeSQL( query=query,", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains : Verifies", "self.apply(func=expr) # ---# def astype(self, dtype: str): \"\"\" --------------------------------------------------------------------------- Converts", "self.parent.__genSQL__(), self.parent.__get_last_order_by__(), limit, offset, ), title=title, ) tail.count = self.parent.shape()[0]", "ts, [str]), (\"q\", q, [tuple]), ( \"start_date\", start_date, [str, datetime.datetime,", "SUM(count) FROM (SELECT COUNT(*) AS count\" \" FROM vdf_table WHERE", "optional Returns the transformation instead of the vDataFrame parent and", "-------- vDataFrame[].decode : Encodes the vColumn with user defined Encoding.", "a user defined Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].get_dummies", "\"log\", \"log10\", \"mod\", \"pow\", \"round\", \"sign\", \"sin\", \"sinh\", \"sqrt\", \"tan\",", "use to order the data. The vColumn type must be", "i in range(len(values[elem])): if isinstance(values[elem][i], decimal.Decimal): values[elem][i] = float(values[elem][i]) return", "'smart'. return_enum_trans: bool, optional Returns the transformation instead of the", "Returns the head of the vColumn. Parameters ---------- limit: int,", "to use to evaluate the approximate density function. Increasing this", "optional Uses numbers as suffix instead of the vColumns categories.", "to use to transform the vColumn. abs : absolute value", ": Uses the Random Forest on a response column to", "result = executeSQL( query, title=\"Computing the different aggregations.\", method=\"fetchall\", )", "number of distinct elements for vColumns 1 and 2 to", "depth of 10. response: str, optional Response vColumn when method", "elements. \"\"\" check_types([(\"n\", n, [int, float])]) query = \"SELECT *", "the License. # # |_ |~) _ _| _ /~\\", "computation. max_cardinality: int, optional Cardinality threshold to use to determine", "str Response vColumn. nbins: int, optional Maximum number of nbins", "date like. Optimized h will be computed if the parameter", "from matplotlib.lines import Line2D colors = gen_colors() if not xlim:", "USING PARAMETERS percentile = 0.75) AS Q3, MAX({}) AS max", "nbins: int, optional Number of nbins. If empty, an optimized", "self.parent.__add_to_history__( \"[Get Dummies]: One hot encoder was applied to the", "if self.isnum(): result = ( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False )", "\"_\") ) ) assert not (self.parent.is_colname_in(name)), NameError( f\"A vColumn has", "AS min, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.25) AS Q1,", "-------- vDataFrame.donut : Draws the donut chart of the vColumn", "function to the input vColumn. \"\"\" check_types([(\"n\", n, [int, float])])", "result # ---# def kurtosis(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "\"\"\" check_types( [ (\"length\", length, [int, float]), (\"unit\", unit, [str]),", "of non-missing elements q% : q quantile (ex: 50% for", "Normalization using the MinMax (min and max). (x - min)", "to evaluate the approximate density function. Increasing this parameter will", "exp : exponential function floor : value down to the", "offset, ), title=title, ) tail.count = self.parent.shape()[0] tail.offset = offset", "PARAMETERS percentile = 0.1) AS 'approx_10%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS", "the predictive power of an independent variable in relation to", "str The new vColumn alias. Returns ------- vDataFrame self.parent See", "query = \"\"\"(SELECT {0} || '', COUNT(*) FROM vdf_table GROUP", "p_1_alpha, mean_1_alpha, \"{}\" ) ) return self.parent # ---# def", "float])]) return self.apply(func=\"{} * ({})\".format(\"{}\", x)) # ---# def nlargest(self,", "a part of the vColumn. \"\"\" return self.iloc(limit=limit) # ---#", "QueryError(\"{}\\nAn Error happened during the filling.\".format(e)) if total > 0:", "Increasing this parameter will increase the precision but will also", "int = 0): \"\"\" --------------------------------------------------------------------------- Returns a part of the", "deviation). Returns ------- float mad See Also -------- vDataFrame.aggregate :", "LIMIT 1\".format( self.alias, cast, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index, ) return executeSQL(", "if stddev == 0: warning_message = \"Can not normalize {}", "vDataFrame of the search. Parameters ---------- val: list List of", "\"float\" else \"\" if index < 0: index += self.parent.shape()[0]", "{} FROM {} WHERE {} IS NOT NULL GROUP BY", "ceil : value up to the next whole number cos", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "kurtosis(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'kurtosis'. Returns -------", "of the record is greater than the threshold it will", "rp = \"\", \"\" for category in cat: tmp_query =", "deviation topk : kth most occurent element (ex: top1 for", "lower bound to the lower bound itself and the values", "+= list(args) check_types([(\"val\", val, [list])]) val = {self.alias: val} return", "find the most relevant interval to use for the discretization.", "the max and the min sem : standard error of", "COUNT({}) AS NAs, MIN({}) AS min, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile", "\"0ifnull\"]: max_floor = 0 all_partition = by if method in", "0.000001: h = round(h, 6) if self.category() == \"int\": h", "{} (response = {}).\".format(self.alias, y) result = to_tablesample(query, title=title) result.values[\"index\"]", "optional Maximum number of distinct elements for vColumns 1 and", "the vColumn. \"\"\" check_types( [ ( \"func\", func, [ \"abs\",", "h will be computed if the parameter is empty or", "meaning global outlier). \"\"\" if isinstance(method, str): method = method.lower()", "len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations) max_floor -= len(self.transformations) for", "first_elem, second_elem = breaks[idx], breaks[idx + 1] if right: op1,", "tablesample An object containing the result. For more information, see", "vColumn. You can write the following list: [\"Fouad\", \"Badr\"] Returns", "/ date-like vColumns.\" ) assert len(breaks) >= 2, ParameterError( \"Length", "\"\"\" check_types( [ (\"method\", method, [str]), (\"of\", of, [str]), (\"max_cardinality\",", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"max\"]).values[self.alias][0]", "functionality for conducting # data science projects on data stored", "= executeSQL( query=query, title=\"Computing the quantiles of {}.\".format(self.alias), method=\"fetchrow\", )", "aggregation. \"\"\" check_types( [ (\"by\", by, [str]), (\"kernel\", kernel, [\"gaussian\",", "import Line2D colors = gen_colors() if not xlim: xmin =", "max(2.0 * (vColumn_075 - vColumn_025) / (count) ** (1.0 /", "all_new_features = [] prefix = ( self.alias.replace('\"', \"\") + prefix_sep.replace('\"',", "+ 1 or not (labels), ParameterError( \"Length of parameter breaks", "use to convert the vColumn. If this parameter is equal", "2\". copy_name: str, optional If not empty, a copy will", "the different aggregation. aad : average absolute deviation approx_unique :", "Encodes the vColumn using a bijection from the different categories", "when 'ts' is lesser than November 1993 the 3rd. end_date:", "else: lp, rp = \"\", \"\" for category in cat:", "* (vColumn_075 - vColumn_025) / (count) ** (1.0 / 3.0),", "aims to do all of the above. The idea is", "\"topk\": assert k >= 2, ParameterError( \"Parameter 'k' must be", "> 0.000001: h = round(h, 6) if self.category() == \"int\":", "See Also -------- vDataFrame.isin : Looks if some specific records", "def nunique(self, approx: bool = True): \"\"\" --------------------------------------------------------------------------- Aggregates the", "is used for testing purpose. Returns ------- vDataFrame self.parent See", "vDataFrame.\".format( self.alias, name ) ) return self.parent # ---# def", "optional Numerical vColumn to use when the parameter method is", "bfill : Back Propagation of the next element (Constant Interpolation).", "Global Outliers. \"\"\" if isinstance(by, str): by = [by] check_types(", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'product'. Returns ------- float", ": q quantile (ex: 50% for the median) prod :", "--------------------------------------------------------------------------- Aggregates the vColumn using 'median'. Parameters ---------- approx: bool,", "(\"area\", area, [bool]), (\"step\", step, [bool]), ] ) self.parent.are_namecols_in(ts) ts", "max_cardinality: Union[int, tuple] = (6, 6), h: Union[int, float, tuple]", "tuple = (0.25, 0.75), start_date: Union[str, datetime.datetime, datetime.date] = \"\",", "--------------------------------------------------------------------------- Slices and transforms the vColumn using a time series", "] if method != \"robust_zscore\": max_floor = 0 for elem", "= 10): \"\"\" --------------------------------------------------------------------------- Returns the n largest vColumn elements.", "self.ctype(), self.category())] self.transformations += [(func, ctype, category)] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__(", "of the bar. If empty, an optimized h will be", "to name the new categories. If empty, names will be", "): \"\"\" --------------------------------------------------------------------------- Draws the pie chart of the vColumn", "!= None ] ), ) executeSQL( \"SELECT {}, {} FROM", "vColumn 'of'. max : Maximum of the vColumn 'of'. sum", "element (Constant Interpolation). mean : Average. median : median. mode", "plot of the vColumn. Parameters ---------- by: str, optional vColumn", "of pie chart. auto : Regular pie chart. donut :", "total = int(total) conj = \"s were \" if total", "def bar( self, method: str = \"density\", of: str =", "nullifzero = 0 avg, stddev = self.aggregate([\"avg\", \"std\"]).values[self.alias] if stddev", "* FROM {}) {}\".format( self.parent.__genSQL__(), query ) query_result = executeSQL(", "self.alias, self.alias, p_1_alpha, ) mean_alpha, mean_1_alpha = [ item[0] for", "using as lower bound quantile(alpha) and as upper bound quantile(1-alpha)", "\"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [ \"{}, {}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\",", "an optimised interval will be computed. nbins: int, optional Number", "{}({}) OVER (PARTITION BY {}))\".format( \"{}\", fun, \"{}\", \", \".join(by)", "(standard error of mean). Returns ------- float sem See Also", "int, unit: str = \"second\", start: bool = True): \"\"\"", "[ ( \"{}::{}\".format(\"{}\", dtype), dtype, get_category_from_vertica_type(ctype=dtype), ) ] self.parent.__add_to_history__( \"[AsType]:", "(Z-Score), if the absolute value of the record is greater", "self, approx: bool = True, ): \"\"\" --------------------------------------------------------------------------- Aggregates the", "end_date: Union[str, datetime.datetime, datetime.date] = \"\", plot_median: bool = False,", "func 'x -> {}' to '{}'\".format( e, func.replace(\"{}\", \"x\"), self.alias.replace('\"',", ") values = { \"index\": [item[0] for item in result],", ") ) name = ( name.replace(\" \", \"_\") .replace(\"/\", \"_\")", "filter the data when 'ts' is lesser than November 1993", "get_category_from_vertica_type(ctype=dtype), ) ] self.parent.__add_to_history__( \"[AsType]: The vColumn {} was converted", "{0} || '', COUNT(*) FROM vdf_table GROUP BY {0} ORDER", "several processes with its abstractions. Parameters ---------- alias: str vColumn", "+ total self.catalog[\"percent\"] = ( 100 * (int(sauv[\"count\"]) + total)", "} for elem in catalog: self.catalog[elem] = catalog[elem] # ---#", "was successfully done.\") return self.parent # ---# def median( self,", "= round(h, 6) if self.category() == \"int\": h = int(max(math.floor(h),", "normalize.\".format( by[0] ), method=\"fetchall\", ) cmin = \"DECODE({}, {}, NULL)\".format(", "): \"\"\" --------------------------------------------------------------------------- Draws the spider plot of the input", "the data. bandwidth: float, optional The bandwidth of the kernel.", "can be 'minute' 'hour'... start: bool, optional If set to", "{} WHERE {} IS NOT NULL) VERTICAPY_SUBTABLE {}\".format( self.alias, self.alias,", "# ---# def hist( self, method: str = \"density\", of:", "---# def label_encode(self): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using a", "(\"h\", h, [int, float]), (\"pie_type\", pie_type, [\"auto\", \"donut\", \"rose\"]), ]", "'cat_stats'\" ) distinct_count, is_numeric, is_date = ( self.nunique(), self.isnum(), self.isdate(),", "else: op1, op2, close_l, close_r = \"<=\", \"<\", \"[\", \"[\"", "( \"PARTITION BY {}\".format( \", \".join([quote_ident(column) for column in by])", "input vColumns based on an aggregation. \"\"\" check_types( [ (\"by\",", "'response' can not be empty in case of discretization using", "'{}'::timestamp, {}) AS {} FROM {}) VERTICAPY_OPTIMAL_H_TABLE\".format( min_date, self.alias, self.alias,", "logarithm log10 : base 10 logarithm mod : remainder of", "elif fun == \"MEDIAN\": val = self.median() new_column = \"COALESCE({},", "(upper != None), ParameterError( \"At least 'lower' or 'upper' must", "= len(result) trans = \"(CASE \" for i in range(1,", "in order_by] for elem in all_partition: if len(self.parent[elem].transformations) > max_floor:", "\"varchar\", \"text\", ) else: trans = (\"FLOOR({}) || ''\", \"varchar\",", "self.parent See Also -------- vDataFrame.case_when : Creates a new feature", "title=\"Computing the optimized histogram nbins using Random Forest.\", method=\"fetchall\", )", "numerical. \"\"\" return self.ctype().lower() in (\"bool\", \"boolean\") # ---# def", "str): val = val.replace(\"'\", \"''\") if val != None: new_column", "} return tablesample(values) # ---# def value_counts(self, k: int =", "# ---# def normalize( self, method: str = \"zscore\", by:", "argument. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply :", "pre_comp = self.parent.__get_catalog_value__(self.alias, \"numh\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp", "parent, alias, [elem for elem in transformations], ) self.catalog =", "= 0): \"\"\" --------------------------------------------------------------------------- Returns a part of the vColumn", "BY {0} ORDER BY COUNT(*) DESC LIMIT {1})\"\"\".format( self.alias, max_cardinality", "creating a copy with the specified name. \\u26A0 Warning :", "[int, float])]) assert x != 0, ValueError(\"Division by 0 is", "= 6, new_category: str = \"Others\", RFmodel_params: dict = {},", "abstractions. Parameters ---------- alias: str vColumn alias. transformations: list, optional", ") query = \"SELECT {}, ord, non_events, events, non_events /", "(\"None\", None)) else \" WHERE {} = '{}'\".format( bin_spatial_to_str(self.category(), self.alias),", "dependent variable. Parameters ---------- y: str Response vColumn. nbins: int,", "Slices the vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_SUBSTR({},", "is missing. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.filter:", "user defined Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode :", "(\"include_lowest\", include_lowest, [bool]), (\"right\", right, [bool]), ] ) assert self.isnum()", "[self.alias]) .transpose() .values[self.alias] ) self.parent.filter( \"({} BETWEEN {} AND {})\".format(self.alias,", "'of' (ex: 50% to get the median). It can also", "loc=\"center left\", bbox_to_anchor=[1, 0.5], ) ax.set_xlabel(self.alias) return ax kernel =", "== \"0ifnull\": new_column = \"DECODE({}, NULL, 0, 1)\" elif method", "float])]) return self.apply(func=\"SUBSTR({}, {}, {})\".format(\"{}\", start, step)) # ---# def", "dropna(self): \"\"\" --------------------------------------------------------------------------- Filters the vDataFrame where the vColumn is", "elem in distinct ] ), bin_spatial_to_str(self.category()), new_category.replace(\"'\", \"''\"), ), \"varchar\",", "steps different than 1.\" ) index_stop = index.stop index_start =", "see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html Returns ------- ax Matplotlib axes object \"\"\"", "the vColumn. One vDataFrame can have multiple children vColumns whereas", "'smart'. A RF Regressor will be trained if the response", "method=\"fetchall\", ) cmin = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join(", "vColumn can only have one parent. catalog: dict, optional Catalog", "[str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"nbins\", nbins,", "vDataFrame.topk : Returns the vColumn most occurent elements. \"\"\" if", "key corresponds to an aggregation. vColumns will memorize the already", "ord, {}::int AS {} FROM {}\".format( trans, self.alias, self.alias, y,", "\"''\") if val != None: new_column = \"COALESCE({}, '{}')\".format(\"{}\", val)", "self.parent[y].distinct() trans = self.discretize( method=\"same_width\" if self.isnum() else \"topk\", nbins=nbins,", "**style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the box plot of the", "\"\"\" --------------------------------------------------------------------------- Slices the vColumn. The vColumn will be transformed.", "False if check: self.parent.are_namecols_in(column) column = self.parent.format_colnames(column) columns += [column]", "of the vColumns categories. Returns ------- vDataFrame self.parent See Also", "See Also -------- vDataFrame[].apply : Applies a function to the", "= ( \"(CASE WHEN {} IN ({}) THEN {} ||", ") method = method.lower() if self.isnum() and method == \"smart\":", "\\/_/ \\/_/ \\/_/ # # # ---# class vColumn(str_sql): \"\"\"", "{}{}) || ']'\".format( \"{}\", h, h, \"{}\", h, h, h,", "n == 0: nullifzero = 0 cmin, cmax = self.aggregate([\"min\",", "True, the approximate quantile is returned. By setting this parameter", "name ) ) return self.parent # ---# def aggregate(self, func:", "[ \"unique\", \"count\", \"mean\", \"std\", \"min\", \"approx_25%\", \"approx_50%\", \"approx_75%\", \"max\",", "-------- vDataFrame.apply : Applies functions to the input vColumns. vDataFrame.applymap", "picked or computed) h: float, optional Interval width of the", "Returns ------- float quantile (or approximate quantile). See Also --------", "{} partitioned by {}.\".format( numcol, self.alias ) values = to_tablesample(query,", "(PARTITION BY {})\".format(response, \"{}\"), \"int\", \"float\") ] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__(", "not be converted to {}\".format( e, self.alias, dtype ) )", "date like). The vColumn will be transformed. Parameters ---------- field:", "\"agg\" not in kwargs: query = \"SELECT {} AS {}", "category, self.alias, self.parent.shape()[0], numcol, cast, ) tmp_query += ( \"", "in elem: self.catalog[elem] = sauv[elem] elif elem == None: self.catalog[elem]", "computed. ax: Matplotlib axes object, optional The axes to plot", "\"Computing WOE & IV of {} (response = {}).\".format(self.alias, y)", "self.parent See Also -------- vDataFrame[].fill_outliers : Fills the vColumn outliers", "is only valid if the vColumns are numerical. Optimized h", "int = -1, dropna: bool = True): \"\"\" --------------------------------------------------------------------------- Returns", "optional Interval width if the vColumn is numerical or of", "\"\", cmax, cmin, ), \"float\", \"float\", ) ] if method", "1\".format( self.alias, cast, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index, ) return executeSQL( query=query,", "float = 2): \"\"\" --------------------------------------------------------------------------- Applies a default function to", "operation pow : number raised to the power of another", "vColumns. vDataFrame.applymap : Applies a function to all the vColumns.", "\"\"\" check_types( [ (\"ts\", ts, [str]), (\"q\", q, [tuple]), (", "# ---# def ctype(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn DB", "sine atan : trigonometric inverse tangent cbrt : cube root", "max( float(vColumn_max - vColumn_min) / int(math.floor(math.log(count, 2) + 2)), 1e-99,", "datetime.datetime, datetime.date] = \"\", plot_median: bool = False, ax=None, **style_kwds,", ") # ---# def plot( self, ts: str, by: str", "q: tuple = (0.25, 0.75), start_date: Union[str, datetime.datetime, datetime.date] =", "the categorical vColumns. bfill : Back Propagation of the next", "): \"\"\" --------------------------------------------------------------------------- Draws the histogram of the vColumn based", "Computes bins with the same number of elements. same_width :", "setattr(self.parent, name, new_vColumn) setattr(self.parent, name[1:-1], new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] self.parent.__add_to_history__(", "of the distribution. mean : Average of the vColumn 'of'.", "computed. h: float, optional Interval width of the bar. If", "VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_ ASC LIMIT 1\".format( self.alias, self.alias, self.parent.__genSQL__(),", "ord\".format( self.alias, query, ) title = \"Computing WOE & IV", "- vColumn_min) / int(math.floor(math.log(count, 2) + 2)), 1e-99, ) fd", "__ # /\\ \\ / / /\\ ___\\ /\\ __", "amount of expressions. The expression generated will look like: even:", "elif h > 0.000001: h = round(h, 6) if self.category()", "represent a number. Returns ------- vDataFrame self.parent See Also --------", "following: (function, type, category) parent: vDataFrame, optional Parent of the", "increase the time of the learning and scoring phases. xlim:", "be picked or computed) ax: Matplotlib axes object, optional The", "self.apply(func=\"ABS({})\") # ---# def add(self, x: float): \"\"\" --------------------------------------------------------------------------- Adds", "bool, optional If set to True, the approximate median is", "+= [sum(result[\"events\"])] result.values[\"pt_non_events\"] += [\"\"] result.values[\"pt_events\"] += [\"\"] result.values[\"woe\"] +=", ") ) else: raise TypeError(\"The vColumn must be numerical for", "x limits of the current axes. ax: Matplotlib axes object,", "the vColumn will be considered as categorical. numcol: str, optional", "max_cardinality + 1) and (method != \"numerical\")) or not (is_numeric)", "if (k < 1) else \"LIMIT {}\".format(k) dropna = \"", "method=\"fetchall\", ) values = { \"index\": [item[0] for item in", "vDataFrame.outliers : Computes the vDataFrame Global Outliers. \"\"\" if isinstance(by,", "empty\\nIf you want to normalize by grouping by elements, please", "* 100)]).values[self.alias][ 0 ] # ---# def range_plot( self, ts:", "{} IS NOT NULL LIMIT 0\".format( func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias,", "elif elem == None: self.catalog[elem] = None elif method ==", "to True, the record will be sliced using the floor", "title=\"Computing the top{} categories of {}.\".format( k if k >", "std, MIN({3}{4}) AS min, APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile =", "xmax = xlim custom_lines = [] columns = self.parent[by].distinct() for", "] elif method == \"robust_zscore\": if n > 0: warning_message", "), ) stddev = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join(", "vColumn cardinality (or approximate cardinality). See Also -------- vDataFrame.aggregate :", "to avoid the creation of correlated features. use_numbers_as_suffix: bool, optional", "functions to the input vColumns. vDataFrame.applymap : Applies a function", "\"int\", \"bool\" else: category, ctype = self.category(), self.ctype() copy_trans =", "---# def astype(self, dtype: str): \"\"\" --------------------------------------------------------------------------- Converts the vColumn", "return self.apply(func=\"{} + ({})\".format(\"{}\", x)) # ---# def add_copy(self, name:", "be created using the input Name. Returns ------- vDataFrame self.parent", "the vDataFrame input aggregations. \"\"\" check_types([(\"approx\", approx, [bool])]) if approx:", "vColumn using 'var' (Variance). Returns ------- float var See Also", "\"SELECT {} FROM (SELECT {}, ROW_NUMBER() OVER (ORDER BY {})", "i + 1] ... END odd : CASE ... WHEN", "optimized h will be computed. ax: Matplotlib axes object, optional", "DATEDIFF('second', '{}'::timestamp, {}) AS {} FROM {}) VERTICAPY_OPTIMAL_H_TABLE\".format( min_date, self.alias,", "\"approx_50%\", \"approx_75%\", \"max\", ] if method != \"cat_stats\": values =", "the input list. Parameters ---------- breaks: list List of values", "floor_end = -1 if (self.category() == \"int\") else \"\" if", "vDataFrame[].bar : Draws the Bar Chart of vColumn based on", "BY {}) OVER () FROM {} LIMIT 1\".format( alpha, self.alias,", "0 when the vColumn is null, 1 otherwise. expr: str,", "elem in sauv: if \"top\" in elem: if \"percent\" in", "nb != 0, Exception( \"Not enough values to compute the", "Optimized h will be computed if the parameter is empty", "was applied to the vColumn {}\\n{} feature{}created: {}\".format( self.alias, len(all_new_features),", "value, [str])]) return self.apply( func=\"REGEXP_REPLACE({}, '{}', '{}')\".format( \"{}\", to_replace.replace(\"'\", \"''\"),", "to fill the vColumn outliers. mean : Replaces the upper", "( self.nunique(), self.isnum(), self.isdate(), ) if (is_date) and not (method", "with the One-Hot Encoding algorithm. Parameters ---------- prefix: str, optional", "vColumn {}.\".format(self.alias), method=\"fetchfirstelem\", ) self.parent.__update_catalog__( {\"index\": [\"store_usage\"], self.alias: [store_usage]} )", "\" WHERE {} IS NOT NULL \".format(self.alias) if (dropna) else", "performance. The catalog will be updated when the parent vDataFrame", "use to compute the aggregation. max_cardinality: int, optional Maximum number", "cycle, uses a ‘pipeline’ mechanism to sequentialize # data transformation", "Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\"", "self.catalog[elem] = (sauv[elem] - sauv[\"min\"]) / ( sauv[\"max\"] - sauv[\"min\"]", "distinct ] ), bin_spatial_to_str(self.category()), new_category.replace(\"'\", \"''\"), ), \"varchar\", \"text\", )", "{}.\".format( k if k > 0 else \"\", self.alias ),", "{0} ORDER BY COUNT(*)\" \" DESC OFFSET {1}) VERTICAPY_SUBTABLE) ORDER", "writing, software # distributed under the License is distributed on", "expr = \"CASE WHEN \" + \" WHEN \".join(conditions) +", "using the method 'same_freq'\" ) count = self.count() nb =", "using the mean encoding of a response. \"\"\" import verticapy.stats", "& IV of {} (response = {}).\".format(self.alias, y) result =", "be date like (date, datetime, timestamp...) or numerical. by: str,", "in each of the vColumn records. vDataFrame[].str_count : Computes the", "Drops the first dummy to avoid the creation of correlated", "elem[1] if elem[1] != None else \"NULL\", ) for elem", "aggregations. \"\"\" return self.aggregate([\"skewness\"]).values[self.alias][0] skew = skewness # ---# def", "self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) except: ctype = get_data_types( \"SELECT", "\"\\t{} => {}\".format(distinct_elements[k], k) expr = \", \".join(expr) + \",", "\".join( [\"{}, {}\".format(elem[0], elem[1]) for elem in result] ), )", "title=title).values elif ( ((distinct_count < max_cardinality + 1) and (method", "result[i], result[i - 1], result[i] ) trans += \" ELSE", "the partition. order_by: list, optional List of the vColumns to", "--------------------------------------------------------------------------- Encodes the vColumn with the One-Hot Encoding algorithm. Parameters", "where, self.alias, n ), title=\"Computing the mode.\", method=\"fetchall\", ) top", "size. unit: str, optional Slice size unit. For example, it", "self.alias), self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), limit, offset, ), title=title, ) tail.count", "vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. \"\"\" check_types([(\"response\",", "of the input vColumns. \"\"\" if isinstance(cat_priority, str) or not", "/ _ __|_. _ _ |_) # \\/ (/_| |", "n: int): \"\"\" --------------------------------------------------------------------------- Rounds the vColumn by keeping only", "self.parent # ---# def fill_outliers( self, method: str = \"winsorize\",", "width. \"\"\" check_types( [(\"method\", method, [\"sturges\", \"freedman_diaconis\", \"fd\", \"auto\"])] )", "MILLISECONDS / MINUTE / MONTH / QUARTER / SECOND /", "True if the vColumn category is date. See Also --------", "method 'topk'\" ) distinct = self.topk(k).values[\"index\"] trans = ( \"(CASE", "and the min sem : standard error of the mean", ") query_result = executeSQL( query=query, title=\"Computing the distinct categories of", "op2, close_l, close_r = \"<=\", \"<\", \"[\", \"[\" if idx", "* i + 1] ... ELSE argv[n] END Returns -------", "lesser than quantile(alpha) or greater than quantile(1-alpha) will be filled.", "check_types( [ ( \"method\", method, [ \"auto\", \"mode\", \"0ifnull\", \"mean\",", "# ---# def pie( self, method: str = \"density\", of:", "return_enum_trans: bool, optional Returns the transformation instead of the vDataFrame", "self.alias, n ) title = \"Reads {} {} smallest elements.\".format(n,", "bool = False ): \"\"\" --------------------------------------------------------------------------- Normalizes the input vColumns", "FROM {}{} LIMIT {} OFFSET {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(),", "named {} was added to the vDataFrame.\".format( self.alias, name )", "{}, '{}', '{}')\".format( \"{}\", length, unit.upper(), start_or_end ) ) #", "\"\"\" check_types([(\"x\", x, [int, float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, -({}),", "self.alias, table ) result = executeSQL( query, title=\"Different aggregations to", "of the different categories to consider when drawing the box", "avg, \"NULLIFZERO\" if (nullifzero) else \"\", stddev ) else: final_transformation", "non_events, SUM({}) AS events FROM ({}) x GROUP BY 1\".format(", "== \"mode\") and (val == None): val = self.mode(dropna=True) if", "non_events, events, non_events / NULLIFZERO(SUM(non_events) OVER ()) AS pt_non_events, events", "= [] model = KernelDensity( name, bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=xlim_tmp,", "= True): \"\"\" --------------------------------------------------------------------------- Returns the k most occurent elements", "part of the vColumn (delimited by an offset and a", "dropna: bool, optional If set to True, NULL values will", "head(self, limit: int = 5): \"\"\" --------------------------------------------------------------------------- Returns the head", "# ---# def apply(self, func: str, copy_name: str = \"\"):", "optional Upper bound. Returns ------- vDataFrame self.parent See Also --------", "columns += [column] if not (\"cmap\" in kwargs): from verticapy.plot", "= \"Can not normalize {} using the MIN and the", "trans else: self.transformations += [trans] sauv = {} for elem", "aggregations. \"\"\" check_types([(\"x\", x, [int, float], (\"approx\", approx, [bool]))]) prefix", "Parameters ---------- start: int Start of the slicing. step: int", "self.aggregate([\"max\"]).values[self.alias][0] # ---# def mean_encode(self, response: str): \"\"\" --------------------------------------------------------------------------- Encodes", "self.alias, n ), title=\"Computing the mode.\", method=\"fetchall\", ) top =", "be 'minute' 'hour'... start: bool, optional If set to True,", "100)]).values[self.alias][ 0 ] # ---# def range_plot( self, ts: str,", "to check if Badr and Fouad are in the vColumn.", "ln : natural logarithm log : logarithm log10 : base", "!= 0, ValueError(\"Division by 0 is forbidden !\") return self.apply(func=\"{}", "Applies a function to the input vColumn. \"\"\" check_types( [", "# ______ / / # \\ / / / #", "to the input vColumn. Returns ------- vDataFrame self.parent See Also", "the new vDataFrame of the search. Parameters ---------- val: list", "{} was deleted from the vDataFrame.\".format(self.alias) ) return parent #", "OVER ()) AS pt_events FROM ({}) x\".format( self.alias, query, )", "the input method. \"\"\" check_types([(\"lower\", lower, [float, int]), (\"upper\", upper,", "str TS (Time Series) vColumn to use to order the", "element{}filled.\".format(total, conj)) self.parent.__add_to_history__( \"[Fillna]: {} {} missing value{} filled.\".format( total,", "[self.max()] elif method == \"topk\": assert k >= 2, ParameterError(", "with Label Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot", "\"\"\" return self.apply(func=\"ABS({})\") # ---# def add(self, x: float): \"\"\"", "0.5) AS 'approx_50%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.75)", "ax=ax, **style_kwds) # ---# def iloc(self, limit: int = 5,", "min(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'min' (Minimum). Returns", "self.parent.shape()[0] limit = index_stop - index_start if limit <= 0:", "RandomForestRegressor(tmp_model_name) else: model = RandomForestClassifier(tmp_model_name) model.set_params({\"n_estimators\": 20, \"max_depth\": 8, \"nbins\":", "vDataFrame self.parent See Also -------- vDataFrame[].date_part : Extracts a specific", "parent: vDataFrame, optional Parent of the vColumn. One vDataFrame can", "hist( self, method: str = \"density\", of: str = \"\",", "the Box Plot of the input vColumns. \"\"\" if isinstance(cat_priority,", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"min\"]).values[self.alias][0]", "1993 the 3rd. plot_median: bool, optional If set to True,", "= \"<=\", \"[\" elif idx == 0: op1, close_l =", "+ 1] ... ELSE argv[n] END Returns ------- vDataFrame self.parent", "vColumn based on an aggregation. \"\"\" check_types( [ (\"method\", method,", "as categorical (No h will be picked or computed) ax:", "used for the discretization (must be > 1) k: int,", "to convert to use to convert the vColumn. If this", "using previously the method on the vColumn \" \"or simply", "Encodes the vColumn with Label Encoding. vDataFrame[].mean_encode : Encodes the", "= breaks[idx], breaks[idx + 1] if right: op1, op2, close_l,", "10 logarithm mod : remainder of a division operation pow", "MICROSECONDS / MILLENNIUM / MILLISECONDS / MINUTE / MONTH /", "] ), bin_spatial_to_str(self.category()), new_category.replace(\"'\", \"''\"), ), \"varchar\", \"text\", ) elif", "== \"MEDIAN\": fun = \"APPROXIMATE_MEDIAN\" query = \"SELECT {}, {}({})", "result sturges = max( float(vColumn_max - vColumn_min) / int(math.floor(math.log(count, 2)", "/ NULLIFZERO(SUM(non_events) OVER ()) AS pt_non_events, events / NULLIFZERO(SUM(events) OVER", "[dict]), (\"return_enum_trans\", return_enum_trans, [bool]), (\"h\", h, [int, float]), (\"response\", response,", "== \"date\" # ---# def isin(self, val: list, *args): \"\"\"", "or 'upper' must have a numerical value\" ) lower_when =", "True, the record will be sliced using the floor of", "if n == 0: nullifzero = 0 avg, stddev =", "Returns the vDataFrame memory usage. \"\"\" import sys total =", "For more information, see utilities.tablesample. See Also -------- vDataFrame.aggregate :", "title=\"Computing the different categories to normalize.\", method=\"fetchall\", ) for i", "( \"MAX({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ),", "to use to order the data. The vColumn type must", "only if the parameter 'by' is empty\\nIf you want to", "values).\\nNothing was filled.\".format( self.alias ) warnings.warn(warning_message, Warning) return self.parent if", "self.parent.isin(val) # ---# def isnum(self): \"\"\" --------------------------------------------------------------------------- Returns True if", "query = \"SELECT {} FROM (SELECT {} AS {}, {}", "Returns ------- ax Matplotlib axes object See Also -------- vDataFrame.plot", "the box plot. The other categories will be filtered. ax:", "float quantile (or approximate quantile). See Also -------- vDataFrame.aggregate :", "self.parent See Also -------- vDataFrame[].slice : Slices the vColumn using", "else \"mode\" total = self.count() if (method == \"mode\") and", "\"s were \" if len(all_new_features) > 1 else \" was", "not (schema): schema = \"public\" tmp_view_name = gen_tmp_name(schema=schema, name=\"view\") tmp_model_name", ") else: return getattr(self, index) # ---# def __len__(self): return", "method in (\"ffill\", \"pad\", \"bfill\", \"backfill\"): assert order_by, ParameterError( \"If", "Also -------- vDataFrame.add_copy : Creates a copy of the vColumn.", "title=\"Computing the distinct categories of {}.\".format(self.alias), method=\"fetchall\", ) return [item", "next element (Constant Interpolation). ffill : Propagation of the first", "[\"numh\"], self.alias: [best_h]}) if self.category() == \"int\": best_h = max(math.floor(best_h),", "5): \"\"\" --------------------------------------------------------------------------- Returns the head of the vColumn. Parameters", "if (nullifzero) else \"\", cmax, cmin, ) else: final_transformation =", "self.alias, dtype ) ) # ---# def avg(self): \"\"\" ---------------------------------------------------------------------------", "Cardinality threshold to use to determine if the vColumn will", ") else: avg, stddev = ( \"AVG({}) OVER (PARTITION BY", "-------- vDataFrame[].isbool : Returns True if the vColumn is boolean.", "moving # data around for processing, VerticaPy brings the logic", "# # ---# def __init__( self, alias: str, transformations: list", "= labels[idx] else: label = f\"{close_l}{first_elem};{second_elem}{close_r}\" conditions += [ f\"'{first_elem}'", "\"robust_zscore\": max_floor = 0 for elem in by: if len(self.parent[elem].transformations)", "(except ints and bools), a RF Classifier otherwise. Example: Write", "self.alias, self.parent.__genSQL__(), where, self.alias, n ), title=\"Computing the mode.\", method=\"fetchall\",", "Returns the n largest vColumn elements. Parameters ---------- n: int,", "vColumns will memorize the already computed aggregations to gain in", "n): name = ( '\"{}{}\"'.format(prefix, k) if (use_numbers_as_suffix) else '\"{}{}\"'.format(", "AS {} FROM {} WHERE {} IS NOT NULL GROUP", "FROM {}{} OFFSET {} LIMIT 1\".format( self.alias, cast, self.parent.__genSQL__(), self.parent.__get_last_order_by__(),", ".transpose() .values[self.alias] ) result = [distinct_count] + result index =", "\".join(conditions) + \" END\" self.apply(func=expr) # ---# def ctype(self): \"\"\"", "h = int(max(math.floor(h), 1)) floor_end = -1 if (self.category() ==", "\"Not enough values to compute the Equal Frequency discretization\" )", "new feature by evaluating some conditions. vDataFrame[].discretize : Discretizes the", "AND {})\".format(self.alias, p_alpha, p_1_alpha) ) return self.parent # ---# def", "vColumns whereas one vColumn can only have one parent. catalog:", "partition. return_trans: bool, optimal If set to True, the method", "expression generated will look like: even: CASE ... WHEN vColumn", "occurent elements. \"\"\" if \"agg\" not in kwargs: query =", "for elem in result] elif self.isnum() and method in (\"same_width\",", "_ |_) # \\/ (/_| | |(_(_|| \\/ # /", "/ ({})\".format(\"{}\", x)) # ---# def drop(self, add_history: bool =", "ELSE (pt_non_events - pt_events) * ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS", "a cutomized aggregation (ex: AVG(column1) + 5). ax: Matplotlib axes", "unique category. h: float, optional The interval size to convert", "GROUP BY 1\".format( self.alias, y, y, query, ) query =", "self.aggregate([\"count\"]).values[self.alias][0] # ---# def cut( self, breaks: list, labels: list", "tuple] = (None, None), ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws", "and upper ZScores. threshold: float, optional Uses the Gaussian distribution", "datetime.datetime, datetime.date, int, float], ), (\"plot_median\", plot_median, [bool]), ] )", "tail.name = self.alias return tail # ---# def isbool(self): \"\"\"", "Any optional parameter to pass to the Matplotlib functions. Returns", "Replaces the upper and lower outliers by their respective average.", "rule. Parameters ---------- val: int/float/str, optional Value to use to", "\"]\" if labels: label = labels[idx] else: label = f\"{close_l}{first_elem};{second_elem}{close_r}\"", "+= [column] if not (\"cmap\" in kwargs): from verticapy.plot import", "sublist in query_result for item in sublist] # ---# def", "warnings.warn(warning_message, Warning) return self mad, med = self.aggregate([\"mad\", \"approx_median\"]).values[self.alias] mad", "else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) assert not (self.parent.is_colname_in(name)),", "'new_name', you'll be able to solve this issue.\" ) self.add_copy(new_name)", "two flower brackets {}. For example to apply the function:", "--------------------------------------------------------------------------- Returns the n smallest elements in the vColumn. Parameters", "self.alias, conj, ) ) else: if verticapy.options[\"print_info\"]: print(\"Nothing was filled.\")", "return self.parent # ---# def count(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "\"sturges\": best_h = sturges elif method.lower() in (\"freedman_diaconis\", \"fd\"): best_h", "print_time_sql=False, ) except: avg, stddev = ( \"AVG({}) OVER (PARTITION", "\"\"\" --------------------------------------------------------------------------- Applies a default function to the vColumn. Parameters", "except: drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") raise drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\")", "str, optional vColumn to use to partition the data. method:", "of the vColumn. See Also -------- vDataFrame.topk : Returns the", "LIMIT {}) VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_ ASC LIMIT 1\".format( self.alias,", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame.add_copy : Creates", "elem in result] elif self.isnum() and method in (\"same_width\", \"auto\"):", "nbins, [float, int]), ] ) if by: self.parent.are_namecols_in(by) by =", "pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp store_usage = executeSQL( \"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar)))", "response. \"\"\" if self.category() in [\"date\", \"float\"]: warning_message = (", ") ) elif method == \"mean\": query = \"WITH vdf_table", "return_enum_trans, [bool]), (\"h\", h, [int, float]), (\"response\", response, [str]), (\"nbins\",", "Returns ------- ax Matplotlib axes object See Also -------- vDataFrame.bar", "you are using. Returns ------- tablesample An object containing the", "= None avg = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join(", "1): \"\"\" --------------------------------------------------------------------------- Returns the nth most occurent element. Parameters", "transformation must be similar to the following: (function, type, category)", "to the input type. Parameters ---------- dtype: str New type.", "The name of the merging category when using the 'topk'", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"count\"]).values[self.alias][0] #", "[\"Fouad\", \"Badr\"] Returns ------- vDataFrame The vDataFrame of the search.", "result[i] ) trans += \" ELSE NULL END)\" trans =", "more information, see utilities.tablesample. See Also -------- vDataFrame[].tail : Returns", "a value to a specified number of decimal places sign", "be gathered together to create a new category : 'Others'.", "'{}' was normalized with the method '{}'.\".format( self.alias, method )", "vColumn using 'avg' (Average). Returns ------- float average See Also", "ctype, category)] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Apply]: The vColumn '{}' was", "0, h: float = 0, ax=None, **style_kwds, ): \"\"\" ---------------------------------------------------------------------------", "): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using multiple statistical aggregations:", "represents the second argument. Returns ------- vDataFrame self.parent See Also", "END AS iv FROM ({}) x ORDER BY ord\".format( self.alias,", "datetime, timestamp...) or numerical. q: tuple, optional Tuple including the", "be similar to the following: (function, type, category) parent: vDataFrame,", "hyperbolic cosine cot : trigonometric cotangent exp : exponential function", "BY verticapy_agg DESC\".format( self.alias, bin_spatial_to_str(self.category(), self.alias), self.alias, kwargs[\"agg\"], self.parent.__genSQL__(), self.alias,", "delimitor of the dummies. drop_first: bool, optional Drops the first", "gen_tmp_name(schema=schema, name=\"view\") tmp_model_name = gen_tmp_name(schema=schema, name=\"model\") assert nbins >= 2,", "set to True, NULL values will not be considered during", "nsmallest(self, n: int = 10): \"\"\" --------------------------------------------------------------------------- Returns the n", "verticapy_agg FROM {} WHERE {} IS NOT NULL GROUP BY", ") else: p_alpha, p_1_alpha = ( self.parent.quantile([alpha, 1 - alpha],", "[int, float]), (\"unit\", unit, [str]), (\"start\", start, [bool]), ] )", "else: return self.apply(func=\"{} - ({})\".format(\"{}\", x)) # ---# def sum(self):", "close_l = \"<\", \"]\" if labels: label = labels[idx] else:", ": Normalization using the MinMax (min and max). (x -", "than November 1993 the 3rd. area: bool, optional If set", "categories and merge the other into one unique category. h:", "( '\"{}{}\"'.format(prefix, k) if (use_numbers_as_suffix) else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\")", "on type numeric|date\" ) if self.isnum(): result = ( self.parent.describe(", "{}, {}({}) FROM {} GROUP BY {};\".format( by[0], fun, self.alias,", "== [0, 1], TypeError( \"vColumn {} must be binary to", "**style_kwds, ) # ---# def plot( self, ts: str, by:", "= \"The vColumn {} has no mode (only missing values).\\nNothing", "Prefix of the dummies. prefix_sep: str, optional Prefix delimitor of", "(0.25, 0.75), start_date: Union[str, datetime.datetime, datetime.date] = \"\", end_date: Union[str,", "each record of the vColumn. vDataFrame[].extract : Extracts the regular", "Normalization using the Z-Score (avg and std). (x - avg)", "[str])]) return self.apply( func=\"REGEXP_SUBSTR({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---#", "def drop_outliers( self, threshold: float = 4.0, use_threshold: bool =", "return self.apply(func=\"{} - ({})\".format(\"{}\", x)) # ---# def sum(self): \"\"\"", "sys.getsizeof(self.transformations) + sys.getsizeof(self.catalog) ) for elem in self.catalog: total +=", "---# def __init__( self, alias: str, transformations: list = [],", "True if the vColumn is boolean, False otherwise. Returns -------", "float, optional The bandwidth of the kernel. kernel: str, optional", ": Discretizes the vColumn. vDataFrame[].label_encode : Encodes the vColumn with", "to use to sort the data when using TS methods.", "count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = result sturges = max(", "\"column\" in kwargs: column = kwargs[\"column\"] else: check = False", "AS {} FROM {}{} LIMIT {} OFFSET {}\".format( bin_spatial_to_str(self.category(), self.alias),", "vColumn(str_sql): \"\"\" --------------------------------------------------------------------------- Python object which that stores all user", "invalid. max_cardinality: int, optional Maximum number of vColumn distinct elements", "= 0 limit = \" LIMIT {}\".format(limit) else: limit =", "Union[str, datetime.datetime, datetime.date] = \"\", plot_median: bool = False, ax=None,", ") index_stop = index.stop index_start = index.start if not (isinstance(index_start,", "__getitem__(self, index): if isinstance(index, slice): assert index.step in (1, None),", "same_freq : Computes bins with the same number of elements.", "max_cardinality: int = 8, cat_priority: list = [], ax=None, **style_kwds,", "not (isinstance(index_start, int)): index_start = 0 if index_start < 0:", "GROUP (ORDER BY {}) OVER (), PERCENTILE_CONT(1 - {}) WITHIN", "vDataFrame self.parent See Also -------- vDataFrame[].str_contains : Verifies if the", "ax Matplotlib axes object See Also -------- vDataFrame[].bar : Draws", "method=\"fetchfirstelem\", ) self.parent.__update_catalog__( {\"index\": [\"store_usage\"], self.alias: [store_usage]} ) return store_usage", "transformation instead of the vDataFrame parent and do not apply", "= \"Computing WOE & IV of {} (response = {}).\".format(self.alias,", "\"\") ) ) # ---# def apply_fun(self, func: str, x:", "input vColumn. \"\"\" return self.apply(func=\"ABS({})\") # ---# def add(self, x:", "6), h: Union[int, float, tuple] = (None, None), ax=None, **style_kwds,", "(\"FLOOR({}) || ''\", \"varchar\", \"text\") else: trans = (\"{} ||", "Matplotlib axes object See Also -------- vDataFrame.boxplot : Draws the", "vDataFrame.memory_usage : Returns the vDataFrame memory usage. \"\"\" import sys", "vColumn to use to partition the data. method: str, optional", "# ---# def str_slice(self, start: int, step: int): \"\"\" ---------------------------------------------------------------------------", "= ( name.replace(\" \", \"_\") .replace(\"/\", \"_\") .replace(\",\", \"_\") .replace(\"'\",", "See Also -------- vDataFrame[].isdate : Returns True if the vColumn", "(unit: b). Returns ------- int vColumn expected store usage. See", "______ __ __ __ __ __ __ __ # /\\", "will be considered as categorical. numcol: str, optional Numerical vColumn", "verticapy.plot import gen_colors from matplotlib.lines import Line2D colors = gen_colors()", "vColumn category is date, False otherwise. Returns ------- bool True", "else \"\", cmax, cmin, ) else: final_transformation = [ (", "different than 1.\" ) index_stop = index.stop index_start = index.start", "they occur, and other statistical information. Parameters ---------- k: int,", "\"\"\"SELECT '{0}' AS 'index', COUNT({1}) AS count, 100 * COUNT({1})", "(most occurent element). 0ifnull : 0 when the vColumn is", "cmin == 0: warning_message = \"Can not normalize {} using", "str, copy_name: str = \"\"): \"\"\" --------------------------------------------------------------------------- Applies a function", "ORDER BY {} ASC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias, n", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "int = 6, new_category: str = \"Others\", RFmodel_params: dict =", "mean encoding with {} as Response Column.\".format( self.alias, response )", "return range_curve_vdf( self, ts, q, start_date, end_date, plot_median, ax=ax, **style_kwds,", "= result.values[self.alias] elif (method == \"cat_stats\") and (numcol != \"\"):", "[str]), (\"nbins\", nbins, [int])]) self.parent.are_namecols_in(y) y = self.parent.format_colnames(y) assert self.parent[y].nunique()", "/ EPOCH / HOUR / ISODOW / ISOWEEK / ISOYEAR", "elements to display. offset: int, optional Number of elements to", "elements to return. dropna: bool, optional If set to True,", "the transformation used instead of the parent vDataFrame. This parameter", "doesn't work on booleans\".format(self.alias) warnings.warn(warning_message, Warning) elif self.isnum(): if method", "occurent elements and their distributions as percents. Parameters ---------- k:", "rounds a value to a specified number of decimal places", "the vColumn. The category will be one of the following:", "vColumn. \"\"\" check_types([(\"limit\", limit, [int, float]), (\"offset\", offset, [int, float])])", "set to True, the Median will be drawn. ax: Matplotlib", "NULL GROUP BY {} ORDER BY {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias,", "MinMax (min and max). (x - min) / (max -", "\"mean\": query = \"WITH vdf_table AS (SELECT * FROM {})", "tail of the vColumn. Parameters ---------- limit: int, optional Number", "func=\"(CASE WHEN {} < {} THEN {} WHEN {} >", "for elem in copy_trans] for elem in sauv: self.catalog[elem] =", "max( len(self.parent[column].transformations), max_floor ) except: pass max_floor -= len(self.transformations) if", "self.parent[y].nunique() == 2, TypeError( \"vColumn {} must be binary to", "be drawn. ax: Matplotlib axes object, optional The axes to", "if (start) else \"END\" return self.apply( func=\"TIME_SLICE({}, {}, '{}', '{}')\".format(", "convert to use to convert the vColumn. If this parameter", "standard deviation topk : kth most occurent element (ex: top1", "and not (method == \"categorical\")) or ( method == \"is_numeric\"", "* int(nbins): nth_elems += [str(total)] total += nb where =", "= \"COALESCE({}, {})\".format(\"{}\", val) elif (len(by) == 1) and (self.parent[by[0]].nunique()", "functions. Parameters ---------- func: list List of the different aggregation.", "min : Minimum of the vColumn 'of'. max : Maximum", "Matplotlib axes object \"\"\" columns = [self.alias] check = True", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"max\"]).values[self.alias][0] # ---#", "== 0: nullifzero = 0 avg, stddev = self.aggregate([\"avg\", \"std\"]).values[self.alias]", "elif ( ((distinct_count < max_cardinality + 1) and (method !=", "): \"\"\" --------------------------------------------------------------------------- Draws the vColumn Density Plot. Parameters ----------", "= ( parent, alias, [elem for elem in transformations], )", "Parameters ---------- n: int, optional Offset. Returns ------- tablesample An", "close_l, close_r = \"<\", \"<=\", \"]\", \"]\" else: op1, op2,", "int(max(math.floor(h), 1)) floor_end = -1 if (self.category() == \"int\") else", "returned. By setting this parameter to False, the function's performance", "elements to be used as categorical. The less frequent elements", "\", \".join([quote_ident(column) for column in by]) ) if (by) else", "approx, [bool])]) if approx: return self.aggregate(func=[\"approx_unique\"]).values[self.alias][0] else: return self.aggregate(func=[\"unique\"]).values[self.alias][0] #", "/ / # \\ / / / # \\/ /", "-------- vDataFrame[].head : Returns the head of the vColumn. vDataFrame[].tail", "OR events = 0 THEN 0 ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events)))", "--------------------------------------------------------------------------- Returns the vColumn memory usage. Returns ------- float vColumn", "--------------------------------------------------------------------------- Aggregates the vColumn using 'sem' (standard error of mean).", "the vColumn. The function variable must be composed of two", "result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].head", "[store_usage]} ) return store_usage # ---# def str_contains(self, pat: str):", "), ) executeSQL( \"SELECT {} FROM {} LIMIT 1\".format( new_column.format(self.alias),", "self.alias ) warnings.warn(warning_message, Warning) return self elif (n == 1)", "result[0], result[3], result[4], result[6], result[7], ) elif self.isdate(): min_date =", "a division operation pow : number raised to the power", "dtype, get_category_from_vertica_type(ctype=dtype), ) ] self.parent.__add_to_history__( \"[AsType]: The vColumn {} was", "k: int = 30): \"\"\" --------------------------------------------------------------------------- Returns the k most", "0 cmin, cmax = self.aggregate([\"min\", \"max\"]).values[self.alias] if cmax - cmin", "Applies a function to all the vColumns. vDataFrame.eval : Evaluates", "\"\"\" check_types([(\"limit\", limit, [int, float]), (\"offset\", offset, [int, float])]) if", "will not be considered during the computation. Returns ------- tablesample", "kwargs[\"cmap\"] = gen_cmap()[0] else: if not (\"color\" in kwargs): from", "0: index_stop += self.parent.shape()[0] limit = index_stop - index_start if", "of the different aggregation. aad : average absolute deviation approx_unique", "\"max\"]) index = result.values[\"index\"] result = result.values[self.alias] elif (method ==", "Forest on a response column to find the most relevant", "def mean_encode(self, response: str): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using", "ORDER BY {} DESC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias, n", "approx: bool, optional If set to True, the approximate cardinality", "see utilities.tablesample. See Also -------- vDataFrame[].describe : Computes the vColumn", "\"WHEN {} BETWEEN {} AND {} THEN '[{};{}]' \".format( \"{}\",", "None ] ), ) executeSQL( \"SELECT {}, {} FROM {}", "float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, {}, {})\".format(x, \"{}\")) else: return", ") distinct_elements = self.distinct() if distinct_elements not in ([0, 1],", "dropped. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.fill_outliers :", "Aggregates the vColumn using 'sem' (standard error of mean). Returns", "[str]), (\"prefix_sep\", prefix_sep, [str]), (\"drop_first\", drop_first, [bool]), (\"use_numbers_as_suffix\", use_numbers_as_suffix, [bool]),", "the vColumn with a user defined Encoding. vDataFrame[].discretize : Discretizes", "return self.parent # ---# def geo_plot(self, *args, **kwargs): \"\"\" ---------------------------------------------------------------------------", "(numcol != \"\"): numcol = self.parent.format_colnames(numcol) assert self.parent[numcol].category() in (\"float\",", "if copy_name: self.add_copy(name=copy_name) for k in range(max_floor): self.parent[copy_name].transformations += [", "\"ceil\", \"cos\", \"cosh\", \"cot\", \"exp\", \"floor\", \"ln\", \"log\", \"log10\", \"mod\",", "{} ): self.parent, self.alias, self.transformations = ( parent, alias, [elem", "x: float If the vColumn type is date like (date,", "Input Start Date. For example, time = '03-11-1993' will filter", "QueryError( \"{}\\nError when applying the func 'x -> {}' to", "BETWEEN {} AND {}) THEN {} ELSE NULL END)\".format( \"{}\",", "IS NOT NULL GROUP BY {0} ORDER BY COUNT(*)\" \"", "return result # ---# def kurtosis(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "ax=ax, **style_kwds) # ---# def category(self): \"\"\" --------------------------------------------------------------------------- Returns the", "|_ |~) _ _| _ /~\\ _ |. # |_)\\/", "'n' must be greater or equal to 1\") where =", "else str(int(n)) if isinstance(top, decimal.Decimal): top = float(top) self.parent.__update_catalog__( {\"index\":", "= 0.75) AS 'approx_75%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile =", "else: category, ctype = self.category(), self.ctype() copy_trans = [elem for", "\"mean\" if (self.isnum() and self.nunique(True) > 6) else \"mode\" total", "\"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if elem[0] != None else \"NULL\", elem[2] if", "\"The vColumn {} has no mode (only missing values).\\nNothing was", "st return self.apply(func=st.decode(str_sql(\"{}\"), *argv)) # ---# def density( self, by:", "50): try: result = executeSQL( \"SELECT {}, AVG({}), STDDEV({}) FROM", "... ELSE argv[n] END Returns ------- vDataFrame self.parent See Also", "not xlim: xmin = self.min() xmax = self.max() else: xmin,", "element to the vColumn. Parameters ---------- x: float If the", "+ \" WHEN \".join(conditions) + \" END\" self.apply(func=expr) # ---#", "decode(self, *argv): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using a user-defined", "method == \"zscore\": if n == 0: nullifzero = 0", "\".join( [ \"{}, {}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if elem[0] != None", "name of the merging category when using the 'topk' method.", "Vertica’s # speed and built-in analytics and machine learning features.", "result.values[\"woe\"] += [\"\"] result.values[\"iv\"] += [sum(result[\"iv\"])] return result # ---#", "\"NULL\" if (elem[0] == None) else \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) ) result[idx][1]", "WHERE {} IS NOT NULL LIMIT 0\".format( func.replace(\"{}\", self.alias), self.parent.__genSQL__(),", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"aad\"]).values[self.alias][0] # ---#", "Other aggregations could work if it is part of the", "total = self.count() if (method == \"mode\") and (val ==", "else: distinct_elements = self.distinct() expr = [\"DECODE({}\"] text_info = \"\\n\"", ") for elem in result if elem[1] != None ]", "True if the vColumn category is date. \"\"\" return self.category()", "of Freedman Diaconis and Sturges. freedman_diaconis : Freedman Diaconis [2", "'numerical' if the vColumn is numerical , 'categorical' otherwise. categorical", "str, x: float = 2): \"\"\" --------------------------------------------------------------------------- Applies a default", "return self.parent one_hot_encode = get_dummies # ---# def head(self, limit:", "a Python library with scikit-like functionality for conducting # data", "DECODE({}, {}, NULL))\".format( \"{}\", by[0], \", \".join( [\"{}, {}\".format(elem[0], elem[1])", "method=\"view\") self.parent.to_db(tmp_view_name) from verticapy.learn.ensemble import ( RandomForestClassifier, RandomForestRegressor, ) drop(tmp_model_name,", "quantile(1-alpha) if 'use_threshold' is set to False else the lower", "dummies. drop_first: bool, optional Drops the first dummy to avoid", ": absolute value acos : trigonometric inverse cosine asin :", "nlargest(self, n: int = 10): \"\"\" --------------------------------------------------------------------------- Returns the n", "to do all of the above. The idea is simple:", "Drops outliers in the vColumn. vDataFrame.outliers : Adds a new", "geopandas plot function. For more information, see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html Returns", "\"::int\" if (self.parent[numcol].isbool()) else \"\" query, cat = [], self.distinct()", ".replace(\"/\", \"_\") .replace(\",\", \"_\") .replace(\"'\", \"_\") ) expr = \"DECODE({},", "vColumn (delimited by an offset and a limit). Parameters ----------", "Parameters ---------- k: int, optional Number of most occurent elements", "percentile = 0.5) AS 'approx_50%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile", "unit, [str]), (\"start\", start, [bool]), ] ) start_or_end = \"START\"", "title = \"Computing WOE & IV of {} (response =", "[int, float]), (\"nbins\", nbins, [float, int]), ] ) if by:", "/ {} < {}\".format( self.alias, result[\"avg\"][0], result[\"std\"][0], threshold ) )", "self, breaks: list, labels: list = [], include_lowest: bool =", "= \", \".join(expr) + \", {})\".format(len(distinct_elements)) self.transformations += [(expr, \"int\",", "parameter is empty or invalid. max_cardinality: int/tuple, optional Maximum number", "the vColumn. Parameters ---------- func: str, Function in pure SQL", "of the vColumn {}.\".format(self.alias), method=\"fetchfirstelem\", ) self.parent.__update_catalog__( {\"index\": [\"store_usage\"], self.alias:", "cmax - cmin == 0: warning_message = \"Can not normalize", "Returns the category of the vColumn. The category will be", "order_by, [list]), ] ) method = method.lower() self.parent.are_namecols_in([elem for elem", "= 5): \"\"\" --------------------------------------------------------------------------- Returns the tail of the vColumn.", "[\"zscore\", \"robust_zscore\", \"minmax\"]), (\"by\", by, [list]), (\"return_trans\", return_trans, [bool]), ]", "/ / / # \\/ / / # / /", "Parent of the vColumn. One vDataFrame can have multiple children", "{}, \"spearman\": {}, \"spearmand\": {}, \"kendall\": {}, \"cramer\": {}, \"biserial\":", "vColumn. \"\"\" check_types( [ (\"breaks\", breaks, [list]), (\"labels\", labels, [list]),", "--------------------------------------------------------------------------- Fills missing elements in the vColumn with a user-specified", "the threshold instead of the 'alpha' parameter. alpha: float, optional", "vColumn to use to partition the data. bandwidth: float, optional", "self.aggregate([\"kurtosis\"]).values[self.alias][0] kurt = kurtosis # ---# def label_encode(self): \"\"\" ---------------------------------------------------------------------------", "trigonometric cosine cosh : hyperbolic cosine cot : trigonometric cotangent", "+ 5). of: str, optional The vColumn to use to", "mean encoding of a response. \"\"\" import verticapy.stats as st", "Uses the Gaussian distribution to define the outliers. After normalizing", "___\\ /\\ __ \\ /\\ \\ /\\ \\/\\ \\ /\\", "either express or implied. # See the License for the", "self.alias, self.alias, n ) title = \"Reads {} {} largest", ": Returns the vDataFrame expected store usage. \"\"\" pre_comp =", "Regular expression to replace. value: str, optional New value. Returns", "verticapy.utilities import * from verticapy.toolbox import * from verticapy.errors import", "absolute value acos : trigonometric inverse cosine asin : trigonometric", "\"SELECT {} FROM (SELECT {} AS {}, {} AS verticapy_agg", "column in order_by]) new_column = \"COALESCE({}, LAST_VALUE({} IGNORE NULLS) OVER", "10\".format( self.parent.__genSQL__(force_columns=force_columns) ), print_time_sql=False, ) self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias) delattr(self.parent, self.alias) except: self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"]", "nbins: int, optional Number of bins used for the discretization", "def isbool(self): \"\"\" --------------------------------------------------------------------------- Returns True if the vColumn is", "**style_kwds ) model.drop() return result except: model.drop() raise # ---#", "vColumn based on an aggregation. \"\"\" if isinstance(pie_type, str): pie_type", "if (dropna) else \"\" query = \"SELECT {} AS {},", "normalize by grouping by elements, please use a method in", "descriptive statistics. \"\"\" check_types([(\"k\", k, [int, float]), (\"dropna\", dropna, [bool])])", "'{}')\".format( \"{}\", length, unit.upper(), start_or_end ) ) # ---# def", "\"\"), func.replace(\"{}\", \"x\"), ) ) return self.parent except Exception as", "sublist] # ---# def div(self, x: float): \"\"\" --------------------------------------------------------------------------- Divides", "get_data_types( \"SELECT {} AS apply_test_feature FROM {} WHERE {} IS", "assert len(breaks) >= 2, ParameterError( \"Length of parameter 'breaks' must", "\"{}\", x) return self.apply(func=expr) # ---# def astype(self, dtype: str):", "return self.apply( func=\"TIME_SLICE({}, {}, '{}', '{}')\".format( \"{}\", length, unit.upper(), start_or_end", "elements.\".format(self.alias, n) return to_tablesample(query, title=title) # ---# def normalize( self,", "def describe( self, method: str = \"auto\", max_cardinality: int =", "head of the vColumn. vDataFrame[].tail : Returns the tail of", "woe, CASE WHEN non_events = 0 OR events = 0", "str, optional The name of the merging category when using", "successfully done.\") return self.parent # ---# def median( self, approx:", "def __getitem__(self, index): if isinstance(index, slice): assert index.step in (1,", "= aggregate # ---# def apply(self, func: str, copy_name: str", "= RandomForestRegressor(tmp_model_name) else: model = RandomForestClassifier(tmp_model_name) model.set_params({\"n_estimators\": 20, \"max_depth\": 8,", "discretization. topk : Keeps the topk most frequent categories and", "---# def slice(self, length: int, unit: str = \"second\", start:", "the outliers in the vColumn. vDataFrame.outliers : Adds a new", "trigonometric cotangent exp : exponential function floor : value down", "i] THEN argv[2 * i + 1] ... ELSE argv[n]", "!= None): return pre_comp assert n >= 1, ParameterError(\"Parameter 'n'", "---------- func: str Function to use to transform the vColumn.", "return pre_comp store_usage = executeSQL( \"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}\".format( bin_spatial_to_str(self.category(),", "max, median, unique... depending on the input method. Parameters ----------", "= [elem for elem in self.transformations] total = self.count() if", "# Special Methods # # ---# def __init__( self, alias:", "'categorical' otherwise. categorical : Uses only categorical aggregations during the", "] ax.set_title(\"KernelDensity\") ax.legend( custom_lines, columns, title=by, loc=\"center left\", bbox_to_anchor=[1, 0.5],", "= max( float(vColumn_max - vColumn_min) / int(math.floor(math.log(count, 2) + 2)),", "vDataFrame.donut : Draws the donut chart of the vColumn based", "was renamed '{}'.\".format(old_name, new_name) ) return parent # ---# def", "6, numcol: str = \"\" ): \"\"\" --------------------------------------------------------------------------- Aggregates the", "------- list Distinct caterogies of the vColumn. See Also --------", "use for the discretization. topk : Keeps the topk most", "] ), ) stddev = \"DECODE({}, {}, NULL)\".format( by[0], \",", "= \"<\", \"]\" if labels: label = labels[idx] else: label", "vColumn. The category will be one of the following: date", "method == \"0ifnull\": category, ctype = \"int\", \"bool\" else: category,", "skip. Returns ------- tablesample An object containing the result. For", "Returns ------- int vColumn expected store usage. See Also --------", "will be gathered together to create a new category :", "will be transformed. Parameters ---------- pat: str regular expression. Returns", "({}) x ORDER BY ord\".format( self.alias, query, ) title =", "{} WHERE {} IS NOT NULL GROUP BY 1) x", "updated when the parent vDataFrame is modified. Attributes ---------- alias,", "ts = self.parent.format_colnames(ts) from verticapy.plot import range_curve_vdf return range_curve_vdf( self,", "topk : Keeps the topk most frequent categories and merge", "check = True if len(args) > 0: column = args[0]", "(Time Series) vColumn to use to order the data. The", "\"silverman\"]), (\"bandwidth\", bandwidth, [int, float]), (\"nbins\", nbins, [float, int]), ]", "] ) method = method.lower() if self.isnum() and method ==", "import hist return hist(self, method, of, max_cardinality, nbins, h, ax=ax,", "else \"\" ) order_by_ts = \", \".join([quote_ident(column) + desc for", "sem(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'sem' (standard error", "will be created using the input Name. Returns ------- vDataFrame", "if self.category() == \"int\": best_h = max(math.floor(best_h), 1) return best_h", "== 0: warning_message = \"Can not normalize {} using a", "can drastically decrease. Returns ------- float quantile (or approximate quantile).", "vColumns. \"\"\" if isinstance(cat_priority, str) or not (isinstance(cat_priority, Iterable)): cat_priority", "optional If set to True, draw a Step Plot. ax:", "result[6], result[7], ) elif self.isdate(): min_date = self.min() table =", "y, self.parent.__genSQL__(), ) query = \"SELECT {}, MIN(ord) AS ord,", "the method 'smart'.\" ) assert response, ParameterError( \"Parameter 'response' can", "\"approx_75%\", \"max\", ] if method != \"cat_stats\": values = {", "min_date = self.min() table = \"(SELECT DATEDIFF('second', '{}'::timestamp, {}) AS", ": Creates a new feature by evaluating some conditions. vDataFrame[].discretize", "verticapy_agg DESC\".format( self.alias, bin_spatial_to_str(self.category(), self.alias), self.alias, kwargs[\"agg\"], self.parent.__genSQL__(), self.alias, )", "(isinstance(cat_priority, Iterable)): cat_priority = [cat_priority] check_types( [ (\"by\", by, [str]),", "NULL, 0, 1)\" elif method in (\"mean\", \"avg\", \"median\"): fun", "best_h = sturges elif method.lower() in (\"freedman_diaconis\", \"fd\"): best_h =", "was \" if verticapy.options[\"print_info\"]: print(\"{} element{}filled.\".format(total, conj)) self.parent.__add_to_history__( \"[Fillna]: {}", "1] Returns ------- float optimal bar width. \"\"\" check_types( [(\"method\",", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"skewness\"]).values[self.alias][0] skew = skewness #", "by: list, optional vColumns used in the partition. order_by: list,", "different vColumn categories. Parameters ---------- response: str Response vColumn. Returns", "!= None else \"NULL\", ) for elem in result if", "by = self.parent.format_colnames(by) if method == \"auto\": method = \"mean\"", "for the numerical and Mode for the categorical vColumns. bfill", "optional vColumns used in the partition. order_by: list, optional List", "category. See Also -------- vDataFrame[].ctype : Returns the vColumn database", "self.count() > 0 # ---# def __repr__(self): return self.head(limit=verticapy.options[\"max_rows\"]).__repr__() #", "LIMIT 0\".format( func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) category", "val != None: new_column = \"COALESCE({}, '{}')\".format(\"{}\", val) elif expr:", "== \"donut\" else False rose = True if pie_type ==", "label = f\"{close_l}{first_elem};{second_elem}{close_r}\" conditions += [ f\"'{first_elem}' {op1} {column} AND", "\"The column 'numcol' must be numerical\" ) cast = \"::int\"", "Sigmoid kernel. silverman : Silverman kernel. nbins: int, optional Maximum", "processes with its abstractions. Parameters ---------- alias: str vColumn alias.", "use when the parameter method is set to 'cat_stats'. Returns", "median and two input quantiles. Parameters ---------- ts: str TS", ") tail.count = self.parent.shape()[0] tail.offset = offset tail.dtype[self.alias] = self.ctype()", "|____/ / / # _____________ / / # \\ /", "## # # __ __ ______ ______ __ __ __", "the other types to varchar. same_freq : Computes bins with", "robust_zscore : Normalization using the Robust Z-Score (median and mad).", "if it is part of the DB version you are", "data\" ) desc = \"\" if (method in (\"ffill\", \"pad\"))", "dropna = \" WHERE {} IS NOT NULL\".format(self.alias) if (dropna)", "verticapy.plot import ts_plot return ts_plot( self, ts, by, start_date, end_date,", "/ nbins if h > 0.01: h = round(h, 2)", "not normalize {} using a Robust Z-Score - The MAD", "op1, op2, close_l, close_r = \"<=\", \"<\", \"[\", \"[\" if", "list): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using the input functions.", "if the vColumn type is date like). The vColumn will", "Adds a copy vColumn to the parent vDataFrame. Parameters ----------", "will be sliced using the floor of the slicing instead", "the input vColumn based on an aggregation. Parameters ---------- by:", "greater than the threshold, it will be considered as an", "std(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'std' (Standard Deviation).", "if method == \"robust_zscore\": self.catalog[\"median\"] = 0 self.catalog[\"mad\"] = 1", "4.0, use_threshold: bool = True, alpha: float = 0.05 ):", "# ---# def astype(self, dtype: str): \"\"\" --------------------------------------------------------------------------- Converts the", ") else: if verticapy.options[\"print_info\"]: print(\"Nothing was filled.\") self.transformations = [elem", "advanced analytical function on a specific vColumn. \"\"\" return self.parent.aggregate(func=func,", "offset, [int, float])]) if offset < 0: offset = max(0,", "category is date. vDataFrame[].isnum : Returns True if the vColumn", "new_category.replace(\"'\", \"''\"), ), \"varchar\", \"text\", ) elif self.isnum() and method", "int(self.count()) # ---# def __nonzero__(self): return self.count() > 0 #", "______ / / # \\ / / / # \\", ") if self.isnum(): result = ( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False", "cmax = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [ \"{},", "= 1 if drop_first else 0 for k in range(len(distinct_elements)", "for testing purpose. Returns ------- vDataFrame self.parent See Also --------", "isinstance(func, str_sql): func = str(func) check_types([(\"func\", func, [str]), (\"copy_name\", copy_name,", "FROM {} LIMIT 1\".format( alpha, self.alias, alpha, self.alias, self.parent.__genSQL__() )", "ax Matplotlib axes object See Also -------- vDataFrame.boxplot : Draws", "1)) floor_end = -1 if (self.category() == \"int\") else \"\"", "method is 'cat_stats'\" ) distinct_count, is_numeric, is_date = ( self.nunique(),", "), title=\"Computing the mode.\", method=\"fetchall\", ) top = None if", "AS {} FROM {}) VERTICAPY_OPTIMAL_H_TABLE\".format( min_date, self.alias, self.alias, self.parent.__genSQL__() )", "Series) vColumn to use to order the data. The vColumn", "for numerical vColumns, cast the other types to varchar. same_freq", "verticapy.plot import spider as spider_plot return spider_plot( self.parent, columns, method,", "to the parent vDataFrame. Parameters ---------- name: str Name of", "ctype = self.category(), self.ctype() copy_trans = [elem for elem in", "'{}', 1, 0)\".format( \"{}\", str(distinct_elements[k]).replace(\"'\", \"''\") ) transformations = self.transformations", "use this file except in compliance with the License. #", ") if return_trans: return \"({} - {}) / {}({})\".format( self.alias,", "{}, \"kendall\": {}, \"cramer\": {}, \"biserial\": {}, \"regr_avgx\": {}, \"regr_avgy\":", "= \"\", by: list = [], order_by: list = [],", "category of the vColumn. The category will be one of", "aggregations to gain in performance. The catalog will be updated", "import boxplot return boxplot(self, by, h, max_cardinality, cat_priority, ax=ax, **style_kwds)", "the vDataFrame. Dropping a vColumn means simply not selecting it", "to display. offset: int, optional Number of elements to skip.", "int, step: int): \"\"\" --------------------------------------------------------------------------- Slices the vColumn. The vColumn", "error of mean). Returns ------- float sem See Also --------", "of the learning and scoring phases. xlim: tuple, optional Set", "of the search. Parameters ---------- val: list List of the", "object which that stores all user transformations. If the vDataFrame", "outliers by their respective average. null : Replaces the outliers", "all_partition = by if method in [\"ffill\", \"pad\", \"bfill\", \"backfill\"]:", ": Verifies if the regular expression is in each of", "alias, [elem for elem in transformations], ) self.catalog = {", "sys.getsizeof(self.catalog) ) for elem in self.catalog: total += sys.getsizeof(elem) return", "See Also -------- vDataFrame.iv_woe : Computes the Information Value (IV)", "({})\".format(\"{}\", x)) # ---# def sum(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "elem[1]) for elem in result] ), ) executeSQL( \"SELECT {}", "Interpolation). mean : Average. median : median. mode : mode", "\"int\", \"int\")] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.catalog[\"count\"] = self.parent.shape()[0] self.catalog[\"percent\"] = 100", "the donut chart of the vColumn based on an aggregation.", "encoding of a response. \"\"\" import verticapy.stats as st return", "Maximum number of distinct elements for vColumns 1 and 2", "------- ax Matplotlib axes object See Also -------- vDataFrame.boxplot :", "END)\".format(lower_when, upper_when, \"{}\") self.apply(func=func) return self.parent # ---# def count(self):", "TIME ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK / YEAR", "0 and 1 that represents the quantile. For example: 0.25", "threshold. Values lesser than quantile(alpha) or greater than quantile(1-alpha) will", "\"::float\" if self.category() == \"float\" else \"\" if index <", "to use to partition the data. h: float, optional Interval", "print_time_sql=False, ) except: cmax, cmin = ( \"MAX({}) OVER (PARTITION", "1)\" elif method in (\"mean\", \"avg\", \"median\"): fun = \"MEDIAN\"", "vColumn_025, vColumn_075, vColumn_max = ( result[0], result[3], result[4], result[6], result[7],", "{name}.\\nBy changing the parameter 'name', you'll be able to solve", "numcol, [str]), ] ) method = method.lower() assert (method !=", "f\"{close_l}{first_elem};{second_elem}{close_r}\" conditions += [ f\"'{first_elem}' {op1} {column} AND {column} {op2}", "FROM ({}) x ORDER BY ord\".format( self.alias, query, ) title", "Multiplies the vColumn by the input element. Parameters ---------- x:", "return to_tablesample(query, title=title) # ---# def normalize( self, method: str", "If set to True, the information will be stored in", "the regular expression in each record of the vColumn. vDataFrame[].str_replace", "categorical (No h will be picked or computed) h: float,", ": median. mode : mode (most occurent element). 0ifnull :", "if the vColumn category is date, False otherwise. Returns -------", "str, optional The type of pie chart. auto : Regular", "= self.parent.format_colnames(response) drop(tmp_view_name, method=\"view\") self.parent.to_db(tmp_view_name) from verticapy.learn.ensemble import ( RandomForestClassifier,", "if cmax - cmin == 0: warning_message = \"Can not", "re, decimal, warnings, datetime from collections.abc import Iterable from typing", "the first element (Constant Interpolation). mean : Average. median :", "int): \"\"\" --------------------------------------------------------------------------- Slices the vColumn. The vColumn will be", "on the vColumn \" \"or simply because of ambiguous columns", "= '{}', tree_id = {}, format = 'tabular'))\".format( tmp_model_name, i", "response, [str]), (\"nbins\", nbins, [int, float]), ( \"method\", method, [\"auto\",", "\"\"\" check_types([(\"start\", start, [int, float]), (\"step\", step, [int, float])]) return", "able to solve this issue.\" ) new_vColumn = vColumn( name,", "used for testing purpose. Returns ------- vDataFrame self.parent See Also", "of a numerical column for each vColumn category. In this", "idx in range(len(breaks) - 1): first_elem, second_elem = breaks[idx], breaks[idx", "the data. The vColumn type must be date like (date,", "code generation will be slower if the vDataFrame has been", "\"\"\" return self.aggregate([\"sem\"]).values[self.alias][0] # ---# def skewness(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "the k most occurent elements and their distributions as percents.", "the quantile. For example: 0.25 represents Q1. approx: bool, optional", "result.values[self.alias] elif (method == \"cat_stats\") and (numcol != \"\"): numcol", "_ |. # |_)\\/ |_)(_|(_|| \\_/|_|(_||| # / # ____________", "Encodes the vColumn with Label Encoding. vDataFrame[].get_dummies : Encodes the", "best splits when 'method' is set to 'smart'. A RF", "\"[\" if idx == 0 and include_lowest: op1, close_l =", "1 and 2 bars. It is only valid if the", "**style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the vColumn Density Plot. Parameters", "vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) return self.apply(func=\"{} * ({})\".format(\"{}\",", "Aggregates the vColumn using 'unique' (cardinality). Parameters ---------- approx: bool,", "range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += [(func, ctype,", "[0], [0], color=updated_dict(param, style_kwds, idx)[\"color\"], lw=4, ), ] ax.set_title(\"KernelDensity\") ax.legend(", "# \\/ (/_| | |(_(_|| \\/ # / # VerticaPy", "int, optional Maximum number of nbins used for the discretization", "# ---# def median( self, approx: bool = True, ):", "representing the outliers threshold. Values lesser than quantile(alpha) or greater", "to cut the vColumn. labels: list, optional Labels used to", "\"approx_25%\", \"approx_50%\", \"approx_75%\", \"max\", ] if method != \"cat_stats\": values", "] ) if func not in (\"log\", \"mod\", \"pow\", \"round\"):", "'tabular'))\".format( tmp_model_name, i ) for i in range(parameters[\"n_estimators\"]) ] query", "= 1): \"\"\" --------------------------------------------------------------------------- Returns the nth most occurent element.", "datetime.date] = \"\", plot_median: bool = False, ax=None, **style_kwds, ):", "\"\"\" return self.aggregate([\"stddev\"]).values[self.alias][0] stddev = std # ---# def store_usage(self):", "[ \"auto\", \"mode\", \"0ifnull\", \"mean\", \"avg\", \"median\", \"ffill\", \"pad\", \"bfill\",", "sort the data when using TS methods. Returns ------- vDataFrame", "prefix, [str]), (\"prefix_sep\", prefix_sep, [str]), (\"drop_first\", drop_first, [bool]), (\"use_numbers_as_suffix\", use_numbers_as_suffix,", "trans += \" ELSE NULL END)\" trans = (trans, \"varchar\",", "result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].describe", "of that relation. vColumns simplify several processes with its abstractions.", "self.parent.__add_to_history__( \"[Drop]: vColumn {} was deleted from the vDataFrame.\".format(self.alias) )", "transformed multiple times, so it's better practice to use this", "print_time_sql=False, ) except: new_column = \"COALESCE({}, {}({}) OVER (PARTITION BY", ") ) return self.parent # ---# def aggregate(self, func: list):", "of the vColumn. Parameters ---------- ts: str TS (Time Series)", "distinct_elements = self.distinct() if distinct_elements not in ([0, 1], [1,", "[bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) if by: self.parent.are_namecols_in(by)", "in catalog: self.catalog[elem] = catalog[elem] # ---# def __getitem__(self, index):", "kernel.lower() from verticapy.learn.neighbors import KernelDensity schema = verticapy.options[\"temp_schema\"] if not", "= {\"color\": colors[idx % len(colors)]} ax = self.parent.search( \"{} =", "= [] columns = self.parent[by].distinct() for idx, column in enumerate(columns):", ": kth most occurent element density unique : cardinality (count", "float(values[elem][i]) return tablesample(values) # ---# def discretize( self, method: str", "= \"winsorize\", threshold: float = 4.0, use_threshold: bool = True,", "first dummy to avoid the creation of correlated features. use_numbers_as_suffix:", "list = [], include_lowest: bool = True, right: bool =", "threshold instead of the 'alpha' parameter. alpha: float, optional Number", ": 0 when the vColumn is null, 1 otherwise. expr:", "the Plot. start_date: str / date, optional Input Start Date.", "k) ] text_info += \"\\t{} => {}\".format(distinct_elements[k], k) expr =", "{} using a Z-Score - The Standard Deviation is null", "(\"value\", value, [str])]) return self.apply( func=\"REGEXP_REPLACE({}, '{}', '{}')\".format( \"{}\", to_replace.replace(\"'\",", "of most occurent elements to return. dropna: bool, optional If", "self.alias), self.parent.__genSQL__(), ), title=\"Computing the Store Usage of the vColumn", "bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), limit, offset, ), title=title, )", "< 50): try: if fun == \"MEDIAN\": fun = \"APPROXIMATE_MEDIAN\"", "+= \"WHEN {} BETWEEN {} AND {} THEN '[{};{}]' \".format(", "*argv): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using a user-defined encoding.", "\"_\")) assert name.replace('\"', \"\"), EmptyParameter( \"The parameter 'name' must not", "return int(self.count()) # ---# def __nonzero__(self): return self.count() > 0", "for elem in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) total", "= '{}'\".format( bin_spatial_to_str(self.category(), self.alias), category, ) ) query += [lp", "({}) x\".format( self.alias, query, ) query = \"SELECT {} AS", "plot on. **style_kwds Any optional parameter to pass to the", "approximate quantile is returned. By setting this parameter to False,", "> {} THEN {} \".format(\"{}\", upper, upper) if (isinstance(upper, (float,", "'x -> {}'.\".format( copy_name.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) ) else:", "lw=4, ), ] ax.set_title(\"KernelDensity\") ax.legend( custom_lines, columns, title=by, loc=\"center left\",", "---# def quantile(self, x: float, approx: bool = True): \"\"\"", "the vColumn {}.\".format(self.alias), method=\"fetchfirstelem\", ) self.parent.__update_catalog__( {\"index\": [\"store_usage\"], self.alias: [store_usage]}", "type date like. Optimized h will be computed if the", "solve this issue.\" ) new_vColumn = vColumn( name, parent=self.parent, transformations=[item", "float): \"\"\" --------------------------------------------------------------------------- Subtracts the input element from the vColumn.", "(elem[1] == None) else str(elem[1]) new_column = \"COALESCE({}, DECODE({}, {},", "to use to compute the optimal h. auto : Combination", "be greater or equal to 1\") where = \" WHERE", "schema = \"public\" name = gen_tmp_name(schema=schema, name=\"kde\") if isinstance(xlim, (tuple,", "except: pass self.parent.__add_to_history__( \"[Discretize]: The vColumn {} was discretized.\".format(self.alias) )", "WHERE {} IS NOT NULL\".format(self.alias) if (dropna) else \"\" query", "sauv: self.catalog[elem] = sauv[elem] return self.parent # ---# def geo_plot(self,", "aggregations. \"\"\" check_types([(\"dropna\", dropna, [bool]), (\"n\", n, [int, float])]) if", "columns = self.parent[by].distinct() for idx, column in enumerate(columns): param =", "0.5], ) ax.set_xlabel(self.alias) return ax kernel = kernel.lower() from verticapy.learn.neighbors", "\".join( [ \"'{}'\".format(str(elem).replace(\"'\", \"''\")) for elem in distinct ] ),", "filled.\") self.transformations = [elem for elem in copy_trans] for elem", "Normalization using the Robust Z-Score (median and mad). (x -", "numerical. q: tuple, optional Tuple including the 2 quantiles used", "will be dropped. Returns ------- vDataFrame self.parent See Also --------", "[bool]), (\"step\", step, [bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts)", "NameError( f\"A vColumn has already the alias {new_name}.\\nBy changing the", "all_cols: try: if (quote_ident(column) in func) or ( re.search( re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"',", "self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import bar return bar(self,", "query = \"SELECT {} AS index, non_events, events, pt_non_events, pt_events,", "= 0, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the histogram", "do all of the above. The idea is simple: instead", "of digits to keep after the comma. Returns ------- vDataFrame", "IS NOT NULL) VERTICAPY_SUBTABLE {}\".format( self.alias, self.alias, self.alias, self.parent.__genSQL__(), self.alias,", "PARAMETERS percentile = 0.25) AS 'approx_25%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS", "(\"return_enum_trans\", return_enum_trans, [bool]), ] ) method = method.lower() if self.isnum()", "Encoding. \"\"\" check_types([(\"response\", response, [str])]) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) assert", "similar to the following: (function, type, category) parent: vDataFrame, optional", "if mad != 0: if return_trans: return \"({} - {})", "{} THEN {} \".format(\"{}\", upper, upper) if (isinstance(upper, (float, int)))", "\"varchar\", \"text\") else: trans = (\"{} || ''\", \"varchar\", \"text\")", "vColumn. See Also -------- vDataFrame.topk : Returns the vColumn most", "Also -------- vDataFrame[].nlargest : Returns the n largest vColumn elements.", "using 'count' (Number of non-Missing elements). Returns ------- int number", "self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, {}, {})\".format(x, \"{}\")) else: return self.apply(func=\"{} +", "y = self.parent.format_colnames(y) assert self.parent[y].nunique() == 2, TypeError( \"vColumn {}", "filled.\".format( self.alias ) warnings.warn(warning_message, Warning) return self.parent if isinstance(val, str):", "query = \"SELECT {} AS {}, {} AS ord, {}::int", "store_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn expected store usage (unit:", "Line2D colors = gen_colors() if not xlim: xmin = self.min()", "pie_type.lower() check_types( [ (\"method\", method, [str]), (\"of\", of, [str]), (\"max_cardinality\",", "the regular expression in each record of the vColumn. vDataFrame[].str_slice", "\"{}\", fun, \"{}\", \", \".join(by) ) else: new_column = \"COALESCE({},", "vColumn when method is set to 'smart'. return_enum_trans: bool, optional", "to identify outliers. After normalizing the data (Z-Score), if the", "def pie( self, method: str = \"density\", of: str =", "[elem for elem in transformations], ) self.catalog = { \"cov\":", "trained if the response is numerical (except ints and bools),", "represents the entire relation, a vColumn can be seen as", "# ---# def mode(self, dropna: bool = False, n: int", "+ ({})\".format(\"{}\", x)) # ---# def add_copy(self, name: str): \"\"\"", "must be binary to use iv_woe.\".format(y) ) self.parent[y].distinct() trans =", "Label Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.", "__ __ ______ ______ __ __ __ __ __ __", "______ ______ __ __ __ __ __ __ __ #", "the vDataFrame. \"\"\" if isinstance(val, str) or not (isinstance(val, Iterable)):", "\".join(by) ) else: new_column = \"COALESCE({}, {}({}) OVER (PARTITION BY", "in enumerate(columns): param = {\"color\": colors[idx % len(colors)]} ax =", "True if the vColumn category is date, False otherwise. Returns", "None ] ), ) stddev = \"DECODE({}, {}, NULL)\".format( by[0],", "vColumn = argv[2 * i] THEN argv[2 * i +", "one element to use to order the data\" ) desc", "the entire relation, a vColumn can be seen as one", "= result sturges = max( float(vColumn_max - vColumn_min) / int(math.floor(math.log(count,", "(n == 1) else str(int(n)) if isinstance(top, decimal.Decimal): top =", "COUNT(*) AS _verticapy_cnt_, 100 * COUNT(*) / {} AS percent", "[bool]), (\"use_numbers_as_suffix\", use_numbers_as_suffix, [bool]), ] ) distinct_elements = self.distinct() if", "Aggregates the vColumn using 'product'. Returns ------- float product See", "store usage (unit: b). Returns ------- int vColumn expected store", "else: cmax, cmin = ( \"MAX({}) OVER (PARTITION BY {})\".format(", "slicing. step: int Size of the slicing. Returns ------- vDataFrame", "threshold: float = 4.0, use_threshold: bool = True, alpha: float", "self.alias), category, ) ) query += [lp + tmp_query +", "method != \"robust_zscore\": max_floor = 0 for elem in by:", "\"NULLIFZERO\" if (nullifzero) else \"\", cmax, cmin, ), \"float\", \"float\",", "assert (method != \"cat_stats\") or (numcol), ParameterError( \"The parameter 'numcol'", "1993 the 3rd. area: bool, optional If set to True,", "- sauv[\"min\"]) / ( sauv[\"max\"] - sauv[\"min\"] ) except: pass", "length, [int, float]), (\"unit\", unit, [str]), (\"start\", start, [bool]), ]", "\"\"\" return self.apply(func=\"DATE_PART('{}', {})\".format(field, \"{}\")) # ---# def decode(self, *argv):", "division operation pow : number raised to the power of", "self.parent.format_colnames(of) from verticapy.plot import spider as spider_plot return spider_plot( self.parent,", "\"Parameter 'nbins' must be greater or equals to 2 in", "the vColumn records. vDataFrame[].str_count : Computes the number of matches", "{0} IS NOT NULL GROUP BY {0} ORDER BY COUNT(*)\"", "index_start, limit, ) return vDataFrameSQL(query) elif isinstance(index, int): cast =", "Also -------- vDataFrame.eval : Evaluates a customized expression. \"\"\" check_types([(\"name\",", "create a new category : 'Others'. cat_priority: list, optional List", "import pie return pie( self, method, of, max_cardinality, h, donut,", "= False, step: bool = False, ax=None, **style_kwds, ): \"\"\"", "with the same number of elements. same_width : Computes regular", "______ # / __ `\\ / / # | \\/", "{}, \"biserial\": {}, \"regr_avgx\": {}, \"regr_avgy\": {}, \"regr_count\": {}, \"regr_intercept\":", "of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import hist return", "limit: int, optional Number of elements to display. offset: int,", "int = 6, nbins: int = 0, h: float =", "\"''\")) ) # ---# def str_extract(self, pat: str): \"\"\" ---------------------------------------------------------------------------", "check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) )", "[int, float]), (\"step\", step, [int, float])]) return self.apply(func=\"SUBSTR({}, {}, {})\".format(\"{}\",", "nth_elems + [str(count)]) ) query = \"SELECT {} FROM (SELECT", "return self.parent.isin(val) # ---# def isnum(self): \"\"\" --------------------------------------------------------------------------- Returns True", "the vColumn records. vDataFrame[].extract : Extracts the regular expression in", "/ # \\/ # _ # \\ / _ __|_.", "SQL code. Note: Dropping a vColumn can make the vDataFrame", "+= \" ELSE NULL END)\" trans = (trans, \"varchar\", \"text\")", "will be transformed. Parameters ---------- field: str The field to", "(\"method\", method, [\"zscore\", \"robust_zscore\", \"minmax\"]), (\"by\", by, [list]), (\"return_trans\", return_trans,", "Chart of vColumn based on an aggregation. \"\"\" check_types( [", "copy of the vColumn. \"\"\" check_types([(\"new_name\", new_name, [str])]) old_name =", ") except: pass if method == \"robust_zscore\": self.catalog[\"median\"] = 0", "in the vColumn with a user-specified rule. Parameters ---------- val:", "* {} + {}{}) || ']'\".format( \"{}\", h, h, \"{}\",", "self.parent.__genSQL__(force_columns=force_columns) ), print_time_sql=False, ) self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias) delattr(self.parent, self.alias) except: self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"] +=", "THEN {} ELSE {} END)\".format( \"{}\", p_alpha, mean_alpha, \"{}\", p_1_alpha,", "by) by = self.parent.format_colnames(by) if method == \"auto\": method =", "the method will return the transformation used instead of the", "= 1, len(by) if self.isbool(): warning_message = \"Normalize doesn't work", "average. null : Replaces the outliers by the NULL value.", "aggregations to compute the optimal h.\", method=\"fetchrow\", ) count, vColumn_min,", "or mod), 'x' represents the second argument. Returns ------- vDataFrame", "offset tail.dtype[self.alias] = self.ctype() tail.name = self.alias return tail #", "self.parent.filter( \"ABS({} - {}) / {} < {}\".format( self.alias, result[\"avg\"][0],", "== \"float\" else \"\" if index < 0: index +=", "vDataFrame : Parent of the vColumn. transformations, str : List", "mean : average median : median min : minimum mode", "GROUP BY 1) x ORDER BY verticapy_agg DESC\".format( self.alias, bin_spatial_to_str(self.category(),", "p_alpha, p_1_alpha = ( self.parent.quantile([alpha, 1 - alpha], [self.alias]) .transpose()", "{}, response: str = \"\", return_enum_trans: bool = False, ):", "corresponding to the offset. For example, if n = 1", "AS ord, {}::int AS {} FROM {}\".format( trans, self.alias, self.alias,", "total > 0: try: if \"count\" in sauv: self.catalog[\"count\"] =", "self.alias, max_cardinality ) if distinct_count > max_cardinality: query += (", ") for elem in sauv: if \"top\" in elem: if", "if val != None: new_column = \"COALESCE({}, '{}')\".format(\"{}\", val) elif", "in range(max_floor): self.parent[copy_name].transformations += [ (\"{}\", self.ctype(), self.category()) ] self.parent[copy_name].transformations", "n ) title = \"Reads {} {} smallest elements.\".format(n, self.alias)", "The vColumn {} was discretized.\".format(self.alias) ) return self.parent # ---#", "vDataFrame self.parent See Also -------- vDataFrame[].drop_outliers : Drops outliers in", "final_transformation = [ ( \"({} - {}) / ({})\".format(\"{}\", med,", "trans = (\"FLOOR({}) || ''\", \"varchar\", \"text\") else: trans =", "advantage Vertica’s # speed and built-in analytics and machine learning", "int = 8, cat_priority: list = [], ax=None, **style_kwds, ):", "the intervals should be closed. If set to True, the", "\\/_____/ \\/_____/ \\/_____/ \\/_____/ \\/_/ \\/_/ \\/_/ \\/_/ # #", "= \"auto\", max_cardinality: int = 6, numcol: str = \"\"", ") .transpose() .values[self.alias] ) result = [distinct_count] + result index", "return self.parent # ---# def distinct(self, **kwargs): \"\"\" --------------------------------------------------------------------------- Returns", "= kernel.lower() from verticapy.learn.neighbors import KernelDensity schema = verticapy.options[\"temp_schema\"] if", "= self.parent.__get_catalog_value__(self.alias, \"numh\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp assert", "greater than November 1993 the 3rd. plot_median: bool, optional If", "or numerical. q: tuple, optional Tuple including the 2 quantiles", ") ) # ---# def avg(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "Returns ------- ax Matplotlib axes object See Also -------- vDataFrame[].hist", "the histogram of the vColumn based on an aggregation. \"\"\"", "val=None, method: str = \"auto\", expr: str = \"\", by:", "query=query, title=\"Computing the equal frequency histogram bins.\", method=\"fetchall\", ) result", "True): \"\"\" --------------------------------------------------------------------------- Slices and transforms the vColumn using a", "interquartile range kurtosis : kurtosis jb : Jarque-Bera index mad", "__ \\ /\\ \\ /\\ \\/\\ \\ /\\ \"-./ \\", "= self.aggregate(func=[\"std\", \"avg\"]).transpose().values self.parent.filter( \"ABS({} - {}) / {} <", "upper, [float, int])]) assert (lower != None) or (upper !=", "able to solve this issue.\" ) self.add_copy(new_name) parent = self.drop(add_history=False)", "\"_\") + prefix_sep.replace('\"', \"_\") ) n = 1 if drop_first", "the vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}')", "dict, optional Catalog where each key corresponds to an aggregation.", "Aggregates the vColumn using the input functions. Parameters ---------- func:", "q% : q Quantile of the vColumn 'of' (ex: 50%", "case of discretization using the method 'topk'\" ) distinct =", "was normalized with the method '{}'.\".format( self.alias, method ) )", "zscore : Normalization using the Z-Score (avg and std). (x", "WHERE {} = '{}'\".format( bin_spatial_to_str(self.category(), self.alias), category, ) ) query", "str, optional Method to use to compute the optimal h.", "if the vColumn is boolean, False otherwise. Returns ------- bool", "# You may not use this file except in compliance", "self.apply(func=\"{} + ({})\".format(\"{}\", x)) # ---# def add_copy(self, name: str):", "), title=\"Computing the Store Usage of the vColumn {}.\".format(self.alias), method=\"fetchfirstelem\",", "range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += [(new_column, ctype,", "assert response_cat == [0, 1], TypeError( \"vColumn {} must be", "ALL (SELECT AVG({}) FROM vdf_table WHERE {} > {})\".format( self.parent.__genSQL__(),", "max(math.floor(best_h), 1) return best_h # ---# def nunique(self, approx: bool", "and transforms the vColumn using a time series rule. Parameters", "p_alpha, p_1_alpha) ) return self.parent # ---# def dropna(self): \"\"\"", "'min' (Minimum). Returns ------- float/str minimum See Also -------- vDataFrame.aggregate", "the input vColumns from the vDataFrame. \"\"\" check_types([(\"add_history\", add_history, [bool])])", "the range plot of the vColumn. The aggregations used are", "self.parent.__genSQL__(), ) query = \"SELECT {}, MIN(ord) AS ord, SUM(1", "The describe method. auto : Sets the method to 'numerical'", "cut( self, breaks: list, labels: list = [], include_lowest: bool", "limit: int = 5, offset: int = 0): \"\"\" ---------------------------------------------------------------------------", "[ ( \"({} - {}) / {}({})\".format( \"{}\", avg, \"NULLIFZERO\"", "transformations. If the vDataFrame represents the entire relation, a vColumn", "length of parameter 'labels' + 1 or parameter 'labels' must", "return self.parent except Exception as e: raise QueryError( \"{}\\nError when", "by = [by] check_types( [ (\"method\", method, [\"zscore\", \"robust_zscore\", \"minmax\"]),", "[str]), ] ) method = method.lower() assert (method != \"cat_stats\")", "In this case, the parameter 'numcol' must be defined. numerical", "median is returned. By setting this parameter to False, the", "vDataFrame. This parameter is used for testing purpose. Returns -------", "{} WHERE {} IS NOT NULL ORDER BY {} ASC", "0: warning_message = \"The method 'robust_zscore' is available only if", "vColumn based on an aggregation. Parameters ---------- by: str, optional", "{} FROM {} LIMIT 1\".format( avg, stddev, self.parent.__genSQL__() ), print_time_sql=False,", "self, alias: str, transformations: list = [], parent=None, catalog: dict", "vColumn Density Plot. Parameters ---------- by: str, optional vColumn to", "(ex: AVG(column1) + 5). of: str, optional The vColumn to", "\"\", prefix_sep: str = \"_\", drop_first: bool = True, use_numbers_as_suffix:", "( \"'[' || FLOOR({} / {}) * {} || ';'", "------- vDataFrame self.parent See Also -------- vDataFrame.drop: Drops the input", "k in range(len(distinct_elements)): expr += [ \"'{}', {}\".format(str(distinct_elements[k]).replace(\"'\", \"''\"), k)", "vdf_table AS (SELECT * FROM {}) {}\".format( self.parent.__genSQL__(), \" UNION", "mod), 'x' represents the second argument. Returns ------- vDataFrame self.parent", "def isin(self, val: list, *args): \"\"\" --------------------------------------------------------------------------- Looks if some", "check_types( [ (\"alpha\", alpha, [int, float]), (\"use_threshold\", use_threshold, [bool]), (\"threshold\",", "# ---# def min(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "the Geospatial object. Parameters ---------- *args / **kwargs Any optional", "for elem in values: for i in range(len(values[elem])): if isinstance(values[elem][i],", "sauv: self.catalog[\"count\"] = int(sauv[\"count\"]) + total self.catalog[\"percent\"] = ( 100", "param = {\"color\": colors[idx % len(colors)]} ax = self.parent.search( \"{}", "vDataFrame.filter: Filters the data using the input expression. \"\"\" self.parent.filter(\"{}", "FROM {} WHERE {} IS NOT NULL GROUP BY {}", "int Start of the slicing. step: int Size of the", "between 0 and 1 that represents the quantile. For example:", "logic to the data. # # # Modules # #", "the kernel. kernel: str, optional The method used for the", "[\"name\", \"dtype\"] + index, \"value\": [self.alias, self.ctype()] + result, }", "def div(self, x: float): \"\"\" --------------------------------------------------------------------------- Divides the vColumn by", "check_types([(\"func\", func, [str]), (\"copy_name\", copy_name, [str])]) try: try: ctype =", "more information, see utilities.tablesample. See Also -------- vDataFrame.analytic : Adds", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"skewness\"]).values[self.alias][0] skew = skewness", "self.alias, self.parent.__genSQL__(), by[0], ), title=\"Computing the different categories {} to", "elements for vColumns 1 and 2 to be used as", "graphical options. # # VerticaPy aims to do all of", "categorical. The less frequent elements will be gathered together to", "to look at the final transformation. Returns ------- vDataFrame self.parent", "(median absolute deviation). Returns ------- float mad See Also --------", "(self.parent.is_colname_in(name)), NameError( f\"A vColumn has already the alias {name}.\\nBy changing", "Applies a function to the vColumn. \"\"\" check_types( [ (", "\"SELECT {}, MIN(ord) AS ord, SUM(1 - {}) AS non_events,", "elif self.isnum() and method in (\"same_width\", \"auto\"): if not (h)", "\"\"\" check_types([(\"approx\", approx, [bool])]) if approx: return self.aggregate(func=[\"approx_unique\"]).values[self.alias][0] else: return", "with the func 'x -> {}'.\".format( copy_name.replace('\"', \"\"), func.replace(\"{}\", \"x\"),", "expr = \"{}({})\".format(func.upper(), \"{}\") else: expr = \"{}({}, {})\".format(func.upper(), \"{}\",", "3rd. plot_median: bool, optional If set to True, the Median", "xmax = self.max() else: xmin, xmax = xlim custom_lines =", "= [ column for column in self.parent._VERTICAPY_VARIABLES_[\"columns\"] ] force_columns.remove(self.alias) executeSQL(", "in the partition. order_by: list, optional List of the vColumns", "xmax), ax=ax, **updated_dict(param, style_kwds, idx), ) custom_lines += [ Line2D(", "---------- new_name: str The new vColumn alias. Returns ------- vDataFrame", "index_start = 0 if index_start < 0: index_start += self.parent.shape()[0]", "optional vColumn to use to partition the data. h: float,", "+ 1 or parameter 'labels' must be empty.\" ) conditions,", "self.catalog[\"std\"] = 1 elif method == \"minmax\": self.catalog[\"min\"] = 0", "not (is_numeric) or (method == \"categorical\") ): query = \"\"\"(SELECT", "var(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'var' (Variance). Returns", "{}.\".format( self.alias, dtype ) ) return self.parent except Exception as", "alias. transformations: list, optional List of the different transformations. Each", "must be similar to the following: (function, type, category) parent:", "\"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}\".format( bin_spatial_to_str(self.category(), self.alias), self.parent.__genSQL__(), ), title=\"Computing the", "entire relation, a vColumn can be seen as one column", "aggregations. \"\"\" return self.aggregate([\"sum\"]).values[self.alias][0] # ---# def tail(self, limit: int", "to all the vColumns. vDataFrame.eval : Evaluates a customized expression.", "list: [\"Fouad\", \"Badr\"] Returns ------- vDataFrame The vDataFrame of the", "boolean. vDataFrame[].isdate : Returns True if the vColumn category is", "ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the box plot of", "k) expr = \", \".join(expr) + \", {})\".format(len(distinct_elements)) self.transformations +=", "vColumn using a user-defined encoding. Parameters ---------- argv: object Any", "---# def plot( self, ts: str, by: str = \"\",", "( \"({} - {}) / {}({} - {})\".format( \"{}\", cmin,", "-------- vDataFrame.outliers : Computes the vDataFrame Global Outliers. \"\"\" if", "categorical : Uses only categorical aggregations during the computation. cat_stats", "alpha, [int, float]), (\"use_threshold\", use_threshold, [bool]), (\"threshold\", threshold, [int, float]),", "\"label_encode is only available for categorical variables.\" ) warnings.warn(warning_message, Warning)", "flower brackets {}. For example to apply the function: x", "optional vColumn to use to partition the data. method: str,", "Discretizes the vColumn. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot", "(\"prefix\", prefix, [str]), (\"prefix_sep\", prefix_sep, [str]), (\"drop_first\", drop_first, [bool]), (\"use_numbers_as_suffix\",", "= method.lower() self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) nullifzero, n = 1,", "/\\ \"-./ \\ /\\ \"-.\\ \\ # \\ \\ \\'/", "= \"auto\", expr: str = \"\", by: list = [],", "\"\"\" --------------------------------------------------------------------------- Returns True if the vColumn category is date,", "vColumn using 'mad' (median absolute deviation). Returns ------- float mad", "vColumn. The vColumn will be transformed. Parameters ---------- pat: str", "Warning) return self mad, med = self.aggregate([\"mad\", \"approx_median\"]).values[self.alias] mad *=", "following: CENTURY / DAY / DECADE / DOQ / DOW", ") self.parent.filter( \"({} BETWEEN {} AND {})\".format(self.alias, p_alpha, p_1_alpha) )", "ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the bar chart of", "Also -------- vDataFrame[].decode : Encodes the vColumn using a user-defined", "\"STDDEV({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ), )", "> 0\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def str_count(self, pat:", "self.head(limit=verticapy.options[\"max_rows\"]).__repr__() # ---# def _repr_html_(self): return self.head(limit=verticapy.options[\"max_rows\"])._repr_html_() # ---# def", "An object containing the result. For more information, see utilities.tablesample.", "\"\"\" --------------------------------------------------------------------------- Normalizes the input vColumns using the input method.", "(PARTITION BY {})\".format( self.alias, \", \".join(by) ), ) else: cmax,", "See Also -------- vDataFrame[].nlargest : Returns the n largest vColumn", "WITHIN GROUP (ORDER BY {}) OVER (), PERCENTILE_CONT(1 - {})", "k if k > 0 else \"\", self.alias ), method=\"fetchall\",", "vColumn. Parameters ---------- threshold: float, optional Uses the Gaussian distribution", "the median) prod : product range : difference between the", "'{}', tree_id = {}, format = 'tabular'))\".format( tmp_model_name, i )", "{}\".format( e, self.alias, dtype ) ) # ---# def avg(self):", "final_transformation sauv = {} for elem in self.catalog: sauv[elem] =", "self.catalog[elem] = sauv[elem] elif elem == None: self.catalog[elem] = None", "self, by: str = \"\", method: str = \"density\", of:", "the vColumn 'of'. max : Maximum of the vColumn 'of'.", "or not (isinstance(val, Iterable)): val = [val] val += list(args)", "user-defined encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes", "if val == None: warning_message = \"The vColumn {} has", "'{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def str_replace(self, to_replace: str,", "the 3rd. end_date: str / date, optional Input End Date.", "features. It supports the # entire data science life cycle,", "# ---# def isnum(self): \"\"\" --------------------------------------------------------------------------- Returns True if the", "(ex: top1 for the mode) topk_percent : kth most occurent", "(\"float\", \"int\"), TypeError( \"The column 'numcol' must be numerical\" )", "sauv[ \"std\" ] elif method == \"minmax\": self.catalog[elem] = (sauv[elem]", "str, by: str = \"\", start_date: Union[str, datetime.datetime, datetime.date] =", "pass total = int(total) conj = \"s were \" if", "of: str, optional The vColumn to use to compute the", ") executeSQL( \"SELECT {}, {} FROM {} LIMIT 1\".format( cmax,", "range kurtosis : kurtosis jb : Jarque-Bera index mad :", ".replace(\",\", \"_\") .replace(\"'\", \"_\") ) expr = \"DECODE({}, '{}', 1,", "the list will be included. right: bool, optional How the", "the approximate cardinality is returned. By setting this parameter to", "aggregation. \"\"\" if isinstance(pie_type, str): pie_type = pie_type.lower() check_types( [", "warning_message = ( \"label_encode is only available for categorical variables.\"", "auto : Combination of Freedman Diaconis and Sturges. freedman_diaconis :", "self.transformations], catalog=self.catalog, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name[1:-1], new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"]", "area, step, ax=ax, **style_kwds, ) # ---# def product(self): \"\"\"", "Also -------- vDataFrame.drop: Drops the input vColumns from the vDataFrame.", "[int, float]), (\"use_threshold\", use_threshold, [bool]), (\"threshold\", threshold, [int, float]), ]", "vColumns used in the partition. order_by: list, optional List of", "), print_time_sql=False, ) self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias) delattr(self.parent, self.alias) except: self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"] += [self.alias]", "\"{}\", \", \".join(by) ) elif method in (\"ffill\", \"pad\", \"bfill\",", "else \"\", cmax, cmin, ), \"float\", \"float\", ) ] if", "is numerical , 'categorical' otherwise. categorical : Uses only categorical", "[str])]) old_name = quote_ident(self.alias) new_name = new_name.replace('\"', \"\") assert not", "AVG({}), STDDEV({}) FROM {} GROUP BY {}\".format( by[0], self.alias, self.alias,", "if nbins <= 0: h = self.numh() else: h =", "/ NULLIFZERO(pt_events))) END AS woe, CASE WHEN non_events = 0", "= executeSQL( \"SELECT {}, AVG({}), STDDEV({}) FROM {} GROUP BY", "(Number of non-Missing elements). Returns ------- int number of non-Missing", "gen_cmap()[0] else: if not (\"color\" in kwargs): from verticapy.plot import", "in the vColumn. \"\"\" check_types([(\"n\", n, [int, float])]) query =", "datetime.date]), (\"area\", area, [bool]), (\"step\", step, [bool]), ] ) self.parent.are_namecols_in(ts)", "otherwise. Returns ------- bool True if the vColumn is boolean.", "vColumn using 'max' (Maximum). Returns ------- float/str maximum See Also", "regular expression. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains", "Drops the vColumn from the vDataFrame. Dropping a vColumn means", "[str])]) name = quote_ident(name.replace('\"', \"_\")) assert name.replace('\"', \"\"), EmptyParameter( \"The", "(avg and std). (x - avg) / std robust_zscore :", "item in query_result] index = [\"unique\", \"count\"] + [item[0] for", "# ---# def mul(self, x: float): \"\"\" --------------------------------------------------------------------------- Multiplies the", "\"asin\", \"atan\", \"cbrt\", \"ceil\", \"cos\", \"cosh\", \"cot\", \"exp\", \"floor\", \"ln\",", "[by] check_types( [ (\"method\", method, [\"zscore\", \"robust_zscore\", \"minmax\"]), (\"by\", by,", "the dependent variable. Parameters ---------- y: str Response vColumn. nbins:", "sauv[\"min\"]) / ( sauv[\"max\"] - sauv[\"min\"] ) except: pass if", "\"WITH vdf_table AS (SELECT * FROM {}) {}\".format( self.parent.__genSQL__(), \"", "0, an optimised interval will be computed. nbins: int, optional", "---------- pat: str regular expression. Returns ------- vDataFrame self.parent See", "else: trans = (\"FLOOR({}) || ''\", \"varchar\", \"text\") else: trans", "h, [int, float]), (\"response\", response, [str]), (\"nbins\", nbins, [int, float]),", "\"unique\": 2, \"approx_unique\": 2, \"prod\": 0, }, ) setattr(self.parent, name,", "for elem in self.catalog: total += sys.getsizeof(elem) return total #", "nbins will be computed. h: float, optional Interval width of", "[int, float]), ] ) if use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values", "int]), (\"upper\", upper, [float, int])]) assert (lower != None) or", "elif method.lower() in (\"freedman_diaconis\", \"fd\"): best_h = fd else: best_h", "mode.\", method=\"fetchall\", ) top = None if not (result) else", "numerical value\" ) lower_when = ( \"WHEN {} < {}", "most occurent element. Parameters ---------- dropna: bool, optional If set", "in sauv: self.catalog[elem] = sauv[elem] return self.parent # ---# def", "usage. \"\"\" pre_comp = self.parent.__get_catalog_value__(self.alias, \"store_usage\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\":", "# entire data science life cycle, uses a ‘pipeline’ mechanism", "vColumn. If this parameter is equal to 0, an optimised", "best_h = fd else: best_h = max(sturges, fd) self.parent.__update_catalog__({\"index\": [\"numh\"],", "NULL ORDER BY {} ASC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias,", "self.isnum() or self.isdate(), TypeError( \"cut only works on numerical /", "== \"float\"): trans = ( \"'[' || FLOOR({} / {})", ") else: query = \"SELECT {} FROM (SELECT {} AS", "and (self.parent[by[0]].nunique() < 50): try: result = executeSQL( \"SELECT {},", "the outliers threshold. Values lesser than quantile(alpha) or greater than", "30): \"\"\" --------------------------------------------------------------------------- Returns the k most occurent elements, how", "Also -------- vDataFrame.case_when : Creates a new feature by evaluating", "bar chart of the vColumn based on an aggregation. Parameters", "caterogies of the vColumn. See Also -------- vDataFrame.topk : Returns", "of the 'alpha' parameter. alpha: float, optional Number representing the", "== None: mean_alpha = \"NULL\" if mean_1_alpha == None: mean_alpha", "float]), (\"offset\", offset, [int, float])]) if offset < 0: offset", "a function to all the vColumns. vDataFrame.eval : Evaluates a", "vColumn with One-Hot Encoding. vDataFrame[].mean_encode : Encodes the vColumn using", "model_name = '{}', tree_id = {}, format = 'tabular'))\".format( tmp_model_name,", "\"float\", ) ] if method != \"robust_zscore\": max_floor = 0", "query=query, title=\"Getting the vColumn element.\", method=\"fetchfirstelem\", ) else: return getattr(self,", "list will be included. right: bool, optional How the intervals", "For more information, see utilities.tablesample. See Also -------- vDataFrame.iv_woe :", "---# def apply_fun(self, func: str, x: float = 2): \"\"\"", "if offset < 0: offset = max(0, self.parent.shape()[0] - limit)", "largest elements.\".format(self.alias, n) return to_tablesample(query, title=title) # ---# def normalize(", "title=title) result.values[\"index\"] += [\"total\"] result.values[\"non_events\"] += [sum(result[\"non_events\"])] result.values[\"events\"] += [sum(result[\"events\"])]", "range(max_floor): self.parent[copy_name].transformations += [ (\"{}\", self.ctype(), self.category()) ] self.parent[copy_name].transformations +=", "style_kwds, idx), ) custom_lines += [ Line2D( [0], [0], color=updated_dict(param,", "one_hot_encode = get_dummies # ---# def head(self, limit: int =", "each key corresponds to an aggregation. vColumns will memorize the", "{\"n_estimators\": 20, \"max_depth\": 10} to train a Random Forest with", "Computes the Information Value (IV) / Weight Of Evidence (WOE)", "idx), ) custom_lines += [ Line2D( [0], [0], color=updated_dict(param, style_kwds,", "in each record of the vColumn. vDataFrame[].extract : Extracts the", "model = RandomForestRegressor(tmp_model_name) else: model = RandomForestClassifier(tmp_model_name) model.set_params({\"n_estimators\": 20, \"max_depth\":", "Also -------- vDataFrame[].isdate : Returns True if the vColumn category", "the mean encoding of a response. \"\"\" import verticapy.stats as", "/ DOW / DOY / EPOCH / HOUR / ISODOW", "(self.parent[numcol].isbool()) else \"\" query, cat = [], self.distinct() if len(cat)", "Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. \"\"\"", "regular expression matches in each of the vColumn record by", "\"\"\" --------------------------------------------------------------------------- Returns the k most occurent elements, how often", "= -1 if (self.category() == \"int\") else \"\" if (h", "func: list List of the different aggregation. aad : average", "idx == 0 and include_lowest: op1, close_l = \"<=\", \"[\"", "vColumn is boolean. vDataFrame[].isdate : Returns True if the vColumn", "= \"(CASE \" for i in range(1, n): trans +=", "\"\"\" --------------------------------------------------------------------------- Returns the n smallest elements in the vColumn.", "gen_tmp_name(schema=schema, name=\"model\") assert nbins >= 2, ParameterError( \"Parameter 'nbins' must", "function: x -> x^2 + 2 use \"POWER({}, 2) +", "vColumn 'of'. q% : q Quantile of the vColumn 'of'", "a specified number of decimal places sign : arithmetic sign", "\"\" ) upper_when = ( \"WHEN {} > {} THEN", "{} AS {} FROM {}{} LIMIT {} OFFSET {}\".format( bin_spatial_to_str(self.category(),", "def mul(self, x: float): \"\"\" --------------------------------------------------------------------------- Multiplies the vColumn by", "(\"n\", n, [int, float])]) if n == 1: pre_comp =", "!= \"\"): numcol = self.parent.format_colnames(numcol) assert self.parent[numcol].category() in (\"float\", \"int\"),", "the missing values. auto : Mean for the numerical and", "draw an Area Plot. step: bool, optional If set to", "tail(self, limit: int = 5): \"\"\" --------------------------------------------------------------------------- Returns the tail", "vDataFrame[].date_part : Extracts a specific TS field from the vColumn.", "numerical or of type date like. Optimized h will be", "kernel=kernel, nbins=nbins, xlim=(xmin, xmax), ax=ax, **updated_dict(param, style_kwds, idx), ) custom_lines", "nbins - 1 ) result = executeSQL( query=query, title=\"Computing the", "1 otherwise. expr: str, optional SQL expression. by: list, optional", "(\"expr\", expr, [str]), (\"by\", by, [list]), (\"order_by\", order_by, [list]), ]", "the ceiling. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].date_part", "by, [str]), (\"method\", method, [str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality,", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"kurtosis\"]).values[self.alias][0] kurt = kurtosis #", ") return store_usage # ---# def str_contains(self, pat: str): \"\"\"", "-------- vDataFrame[].hist : Draws the histogram of the vColumn based", "evaluate the approximate density function. Increasing this parameter will increase", "if self.category() == \"int\": h = int(max(math.floor(h), 1)) floor_end =", "Any amount of expressions. The expression generated will look like:", "def store_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn expected store usage", "---# def drop(self, add_history: bool = True): \"\"\" --------------------------------------------------------------------------- Drops", "# You may obtain a copy of the License at", "else: expr = \"{}({}, {})\".format(func.upper(), \"{}\", x) return self.apply(func=expr) #", "input 'quantile'. Parameters ---------- x: float A float between 0", "conj = \"s were \" if len(all_new_features) > 1 else", "= 0, pie_type: str = \"auto\", ax=None, **style_kwds, ): \"\"\"", "= [elem for elem in copy_trans] for elem in sauv:", "box plot. The other categories will be filtered. ax: Matplotlib", "y, query, ) query = \"SELECT {}, ord, non_events, events,", "check_types([(\"response\", response, [str])]) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) assert self.parent[response].isnum(), TypeError(", "power or mod), 'x' represents the second argument. Returns -------", "LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias, n ) title = \"Reads", "in result], } return tablesample(values) # ---# def value_counts(self, k:", "Deviation). Returns ------- float std See Also -------- vDataFrame.aggregate :", "the data. h: float, optional Interval width if the vColumn", "{}\".format(elem[0], elem[1]) for elem in result] ), ) executeSQL( \"SELECT", "else: return self.aggregate(func=[\"unique\"]).values[self.alias][0] # ---# def pie( self, method: str", "Parameters ---------- x: float A float between 0 and 1", "elif method == \"robust_zscore\": if n > 0: warning_message =", "ts: str, by: str = \"\", start_date: Union[str, datetime.datetime, datetime.date]", "2, \"prod\": 0, }, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name.replace('\"',", "= (\"FLOOR({}) || ''\", \"varchar\", \"text\") else: trans = (\"{}", "utilities.tablesample. See Also -------- vDataFrame.iv_woe : Computes the Information Value", "vColumn is numerical. \"\"\" return self.ctype().lower() in (\"bool\", \"boolean\") #", "---# def decode(self, *argv): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using", "result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].tail", "the following mapping:{}\".format( self.alias, text_info ) ) return self.parent #", "if isinstance(val, str) or not (isinstance(val, Iterable)): val = [val]", "transformation. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode :", "self.head(limit=verticapy.options[\"max_rows\"])._repr_html_() # ---# def __setattr__(self, attr, val): self.__dict__[attr] = val", "WHERE split_value IS NOT NULL GROUP BY 1 ORDER BY", "5). ax: Matplotlib axes object, optional The axes to plot", "parameter 'new_name', you'll be able to solve this issue.\" )", "include_lowest: bool, optional If set to True, the lowest element", "/ # \\ / # \\/ # _ # \\", "science projects on data stored in Vertica, taking advantage Vertica’s", "cast, ) tmp_query += ( \" WHERE {} IS NULL\".format(self.alias)", "greater than quantile(1-alpha) will be filled. Returns ------- vDataFrame self.parent", "the vColumn records by an input value. vDataFrame[].str_slice : Slices", "assert not (self.parent.is_colname_in(new_name)), NameError( f\"A vColumn has already the alias", "See Also -------- vDataFrame[].hist : Draws the histogram of the", "= 0 self.catalog[\"std\"] = 1 elif method == \"minmax\": self.catalog[\"min\"]", "of the Random Forest model parameters used to compute the", "\\ # \\ \\__| \\ \\_____\\ \\ \\_____\\ \\ \\_____\\", "{}) * {} + {}{}) || ']'\".format( \"{}\", h, h,", "aggregations during the computation. max_cardinality: int, optional Cardinality threshold to", "vdf_table GROUP BY {0} ORDER BY COUNT(*) DESC LIMIT {1})\"\"\".format(", "VerticaPy Modules import verticapy from verticapy.utilities import * from verticapy.toolbox", "\"The response column must be numerical to use a mean", "fun == \"AVG\": val = self.avg() elif fun == \"MEDIAN\":", "nbins, [int, float]), ] ) if of: self.parent.are_namecols_in(of) of =", "COUNT(*)\" \" DESC OFFSET {1}) VERTICAPY_SUBTABLE) ORDER BY count DESC\"", "in all_partition: if len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations) max_floor", "self.numh() else: h = (self.max() - self.min()) * 1.01 /", "OVER () FROM {} LIMIT 1\".format( alpha, self.alias, alpha, self.alias,", "-------- vDataFrame[].drop_outliers : Drops outliers in the vColumn. vDataFrame.outliers :", "the vColumn using 'median'. Parameters ---------- approx: bool, optional If", "[ ( \"({} - {}) / ({})\".format(\"{}\", med, mad), \"float\",", "their respective average. null : Replaces the outliers by the", "if isinstance(index_stop, int): if index_stop < 0: index_stop += self.parent.shape()[0]", "100 self.parent.__add_to_history__( \"[Label Encoding]: Label Encoding was applied to the", "mad != 0: if return_trans: return \"({} - {}) /", "BY 1 ORDER BY 2 DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER", "categorical (No h will be picked or computed) ax: Matplotlib", "- self.min()) * 1.01 / nbins if h > 0.01:", "return tablesample(values) # ---# def discretize( self, method: str =", "in the vColumn. Parameters ---------- threshold: float, optional Uses the", "fun == \"MEDIAN\": val = self.median() new_column = \"COALESCE({}, {})\".format(\"{}\",", "For example, it can be 'minute' 'hour'... start: bool, optional", "_ # \\ / _ __|_. _ _ |_) #", "gen_colors()[0] if not (\"legend\" in kwargs): kwargs[\"legend\"] = True if", "BY {}) OVER (), PERCENTILE_CONT(1 - {}) WITHIN GROUP (ORDER", ") result = [elem[0] for elem in result] except: drop(tmp_view_name,", "- {}) AS non_events, SUM({}) AS events FROM ({}) x", "return self.apply( func=\"REGEXP_REPLACE({}, '{}', '{}')\".format( \"{}\", to_replace.replace(\"'\", \"''\"), value.replace(\"'\", \"''\")", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate(func=[\"prod\"]).values[self.alias][0]", "under the Apache License, Version 2.0 (the \"License\"); # You", "self.aggregate(func=[\"approx_unique\"]).values[self.alias][0] else: return self.aggregate(func=[\"unique\"]).values[self.alias][0] # ---# def pie( self, method:", "Aggregates the vColumn using 'sum'. Returns ------- float sum See", "else: warning_message = \"Can not normalize {} using a Robust", "See Also -------- vDataFrame.expected_store_usage : Returns the vDataFrame expected store", "\"[Fillna]: {} {} missing value{} filled.\".format( total, self.alias, conj, )", "{})\".format( \"{}\", cmin, \"NULLIFZERO\" if (nullifzero) else \"\", cmax, cmin,", "self.alias, self.alias, self.alias, ) else: query = \"SELECT {} FROM", "== 0 and include_lowest: op1, close_l = \"<=\", \"[\" elif", "self.alias: [top]} ) return top # ---# def mul(self, x:", "= \"WHERE _verticapy_row_nb_ IN ({})\".format( \", \".join([\"1\"] + nth_elems +", "int = 6, numcol: str = \"\" ): \"\"\" ---------------------------------------------------------------------------", "by: str, optional vColumn to use to partition the TS.", "] elif method == \"minmax\": self.catalog[elem] = (sauv[elem] - sauv[\"min\"])", "kth most occurent element density unique : cardinality (count distinct)", "label_encode(self): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using a bijection from", "int vColumn cardinality (or approximate cardinality). See Also -------- vDataFrame.aggregate", "kwargs): kwargs[\"figsize\"] = (14, 10) return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs) # ---#", "= self.catalog self.parent.__add_to_history__( \"[Apply]: The vColumn '{}' was transformed with", "-> {}'.\".format( self.alias.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) ) return self.parent", "# ---# def spider( self, by: str = \"\", method:", "expressions. The expression generated will look like: even: CASE ...", "trigonometric inverse cosine asin : trigonometric inverse sine atan :", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"count\"]).values[self.alias][0]", "to use when the parameter method is set to 'cat_stats'.", "query, nth_elems = nb, [], [] while total < int(float(count", "to_tablesample(query, title=title).values elif ( ((distinct_count < max_cardinality + 1) and", ": Evaluates a customized expression. \"\"\" check_types([(\"name\", name, [str])]) name", "2, ParameterError( \"Parameter 'k' must be greater or equals to", "[\"top{}\".format(n)], self.alias: [top]} ) return top # ---# def mul(self,", "AS apply_test_feature FROM {} WHERE {} IS NOT NULL LIMIT", "\"_\") ) ) name = ( name.replace(\" \", \"_\") .replace(\"/\",", ": Computes the vDataFrame Global Outliers. \"\"\" if isinstance(by, str):", "encoding. Parameters ---------- argv: object Any amount of expressions. The", "str = \"winsorize\", threshold: float = 4.0, use_threshold: bool =", "= [], include_lowest: bool = True, right: bool = True,", "\"dtype\"] + index, \"value\": [self.alias, self.ctype()] + result, } if", "n largest vColumn elements. Parameters ---------- n: int, optional Offset.", "Returns ------- float product See Also -------- vDataFrame.aggregate : Computes", "= pie_type.lower() check_types( [ (\"method\", method, [str]), (\"of\", of, [str]),", "return self.head(limit=verticapy.options[\"max_rows\"]).__repr__() # ---# def _repr_html_(self): return self.head(limit=verticapy.options[\"max_rows\"])._repr_html_() # ---#", "\"count\" in sauv: self.catalog[\"count\"] = sauv[\"count\"] self.catalog[\"percent\"] = ( 100", "The other categories will be filtered. ax: Matplotlib axes object,", "ax=ax, **style_kwds ) model.drop() return result except: model.drop() raise #", "vColumn from the vDataFrame. Dropping a vColumn means simply not", "warnings.warn(warning_message, Warning) return self elif n == 1: try: result", "range plot of the vColumn. The aggregations used are the", "Also -------- vDataFrame[].slice : Slices the vColumn using a time", "* {} || ';' || (FLOOR({} / {}) * {}", "input value. The vColumn will be transformed. Parameters ---------- to_replace:", "> {})\".format( self.parent.__genSQL__(), self.alias, self.alias, p_alpha, self.alias, self.alias, p_1_alpha, )", "optional Maximum number of the vColumn distinct elements to be", "{}, {})\".format(x, \"{}\")) else: return self.apply(func=\"{} + ({})\".format(\"{}\", x)) #", "RFmodel_params, [dict]), (\"return_enum_trans\", return_enum_trans, [bool]), (\"h\", h, [int, float]), (\"response\",", "# ---# def iv_woe(self, y: str, nbins: int = 10):", "catalog={ \"min\": 0, \"max\": 1, \"count\": self.parent.shape()[0], \"percent\": 100.0, \"unique\":", ") count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = ( result[0], result[3],", "vColumn distinct elements to be used as categorical (No h", "value up to the next whole number cos : trigonometric", "else \"\" if (h > 1) or (self.category() == \"float\"):", "approx: return self.aggregate(func=[\"approx_unique\"]).values[self.alias][0] else: return self.aggregate(func=[\"unique\"]).values[self.alias][0] # ---# def pie(", "field from the vColumn. \"\"\" check_types( [ (\"length\", length, [int,", "func: str, copy_name: str = \"\"): \"\"\" --------------------------------------------------------------------------- Applies a", "LIMIT {1})\"\"\".format( self.alias, max_cardinality ) if distinct_count > max_cardinality: query", "vColumn alias. catalog, dict : Catalog of pre-computed aggregations. parent,", "library with scikit-like functionality for conducting # data science projects", "\"\"\" --------------------------------------------------------------------------- Multiplies the vColumn by the input element. Parameters", "= sauv[elem] return self.parent # ---# def geo_plot(self, *args, **kwargs):", "self.aggregate([\"skewness\"]).values[self.alias][0] skew = skewness # ---# def slice(self, length: int,", "the aggregation. max_cardinality: int, optional Maximum number of the vColumn", ") # ---# def str_extract(self, pat: str): \"\"\" --------------------------------------------------------------------------- Extracts", "return total # ---# def min(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "pass to the geopandas plot function. For more information, see:", "if \"top\" in elem: if \"percent\" in elem: self.catalog[elem] =", "method = method.lower() self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) nullifzero, n =", "[\"auto\", \"donut\", \"rose\"]), ] ) donut = True if pie_type", "the first dummy to avoid the creation of correlated features.", "descriptive statistics. \"\"\" return self.describe(method=\"categorical\", max_cardinality=k) # ---# def var(self):", "Encode]: The vColumn {} was transformed using a mean encoding", "OVER (), PERCENTILE_CONT(1 - {}) WITHIN GROUP (ORDER BY {})", "[\"ffill\", \"pad\", \"bfill\", \"backfill\"]: all_partition += [elem for elem in", "vColumn. \"\"\" return self.iloc(limit=limit) # ---# def hist( self, method:", "optional The describe method. auto : Sets the method to", "try: try: ctype = get_data_types( \"SELECT {} AS apply_test_feature FROM", "'{}')\".format( \"{}\", to_replace.replace(\"'\", \"''\"), value.replace(\"'\", \"''\") ) ) # ---#", "partition_by = ( \"PARTITION BY {}\".format( \", \".join([quote_ident(column) for column", "optional The method used for the plot. gaussian : Gaussian", "vDataFrame self.parent See Also -------- vDataFrame.filter: Filters the data using", "must be date like (date, datetime, timestamp...) or numerical. by:", "TS field from the vColumn. \"\"\" check_types( [ (\"length\", length,", "to compute other vColumns. Parameters ---------- add_history: bool, optional If", "least one element to use to order the data\" )", "vDataFrameSQL(query) elif isinstance(index, int): cast = \"::float\" if self.category() ==", "# \\ / / / # \\ / / /", "an aggregation. vColumns will memorize the already computed aggregations to", "input aggregations. \"\"\" return self.aggregate([\"avg\"]).values[self.alias][0] mean = avg # ---#", "---------- method: str, optional Method to use to normalize. zscore", "lower) if (isinstance(lower, (float, int))) else \"\" ) upper_when =", "the distribution. mean : Average of the vColumn 'of'. min", "more information, see utilities.tablesample. See Also -------- vDataFrame[].nlargest : Returns", "a part of the vColumn (delimited by an offset and", "(SELECT AVG({}) FROM vdf_table WHERE {} < {}) UNION ALL", "# limitations under the License. # # |_ |~) _", "vDataFrame The vDataFrame of the search. See Also -------- vDataFrame.isin", "slicing having steps different than 1.\" ) index_stop = index.stop", "\\ \\-./\\ \\ \\ \\ \\-. \\ # \\ \\__|", "10): \"\"\" --------------------------------------------------------------------------- Computes the Information Value (IV) / Weight", "on a response column to find the most relevant interval", "# ---# def max(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "Computes the vDataFrame input aggregations. \"\"\" check_types([(\"approx\", approx, [bool])]) if", "try: sauv = {} for elem in self.catalog: sauv[elem] =", "must be greater or equal to 1\") where = \"", "Warning : SQL code generation will be slower if the", "LIMIT {}) VERTICAPY_SUBTABLE ORDER BY split_value::float\".format( \" UNION ALL \".join(query),", "for k in range(len(distinct_elements) - n): name = ( '\"{}{}\"'.format(prefix,", "\\ \\ \\/\\ \\ \\ \\ \\____ \\ \\ \\_\\", "(\"max_cardinality\", max_cardinality, [int, float]), (\"numcol\", numcol, [str]), ] ) method", "is date. See Also -------- vDataFrame[].isbool : Returns True if", "(IV) Table. \"\"\" check_types([(\"y\", y, [str]), (\"nbins\", nbins, [int])]) self.parent.are_namecols_in(y)", "vColumn. \"\"\" check_types([(\"n\", n, [int, float])]) query = \"SELECT *", "* sauv[\"count\"] / self.parent.shape()[0] ) except: pass self.parent.__add_to_history__( \"[Discretize]: The", "pass max_floor -= len(self.transformations) if copy_name: self.add_copy(name=copy_name) for k in", "self.category())] self.transformations += final_transformation sauv = {} for elem in", "\"{}\", \", \".join(by) ) else: new_column = \"COALESCE({}, {}({}) OVER", "== \"mean\": query = \"WITH vdf_table AS (SELECT * FROM", "i ) for i in range(parameters[\"n_estimators\"]) ] query = \"SELECT", "if (method in (\"ffill\", \"pad\")) else \" DESC\" partition_by =", "start_or_end = \"START\" if (start) else \"END\" return self.apply( func=\"TIME_SLICE({},", "int vColumn expected store usage. See Also -------- vDataFrame.expected_store_usage :", "to the vColumn. Parameters ---------- func: str Function to use", "partitioned by the different vColumn categories. Parameters ---------- response: str", "{} = '{}'\".format( bin_spatial_to_str(self.category(), self.alias), category, ) ) query +=", "(\"prefix_sep\", prefix_sep, [str]), (\"drop_first\", drop_first, [bool]), (\"use_numbers_as_suffix\", use_numbers_as_suffix, [bool]), ]", "= \"DECODE({}, NULL, 0, 1)\" elif method in (\"mean\", \"avg\",", "is lesser than November 1993 the 3rd. end_date: str /", ": Computes bins with the same number of elements. same_width", "The method to use to aggregate the data. count :", "vColumn descriptive statistics. \"\"\" check_types([(\"k\", k, [int, float]), (\"dropna\", dropna,", "optional If set to True, the record will be sliced", "**style_kwds Any optional parameter to pass to the Matplotlib functions.", "By setting this parameter to False, the function's performance can", "where the vColumn is missing. Returns ------- vDataFrame self.parent See", "index) # ---# def __len__(self): return int(self.count()) # ---# def", "# ---# def kurtosis(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "and their distributions as percents. Parameters ---------- k: int, optional", "int = 200, xlim: tuple = None, ax=None, **style_kwds, ):", "= self.topk(k).values[\"index\"] trans = ( \"(CASE WHEN {} IN ({})", "= \"_\", drop_first: bool = True, use_numbers_as_suffix: bool = False,", "than November 1993 the 3rd. end_date: str / date, optional", "most occurent element. See Also -------- vDataFrame.aggregate : Computes the", "self.parent.__update_catalog__({\"index\": index, self.alias: result}) for elem in values: for i", "[elem for elem in self.transformations] total = self.count() if method", "!= \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp assert self.isnum() or self.isdate(), ParameterError( \"numh", "/ / / # \\_______/ / / # ______ /", "\"WHEN {} < {} THEN {} \".format(\"{}\", lower, lower) if", "self.alias, self.alias, self.parent.__genSQL__(), by[0], ), title=\"Computing the different categories to", "for item in sublist] # ---# def div(self, x: float):", "self.alias, y, y, self.parent.__genSQL__(), ) query = \"SELECT {}, MIN(ord)", "of elements to display. Returns ------- tablesample An object containing", "new_name.replace('\"', \"\") assert not (self.parent.is_colname_in(new_name)), NameError( f\"A vColumn has already", "will be picked or computed) ax: Matplotlib axes object, optional", "Parent of the vColumn. transformations, str : List of the", "box plot of the vColumn. Parameters ---------- by: str, optional", "ax Matplotlib axes object See Also -------- vDataFrame.plot : Draws", "the regular expression matches in each of the vColumn record", "0.75) AS 'approx_75%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.9)", "ORDER BY ord\".format( self.alias, query, ) title = \"Computing WOE", "Aggregates the vColumn using 'mad' (median absolute deviation). Returns -------", "= [\"unique\", \"count\"] + [item[0] for item in query_result] else:", "0, max_cardinality: int = 8, cat_priority: list = [], ax=None,", ") return [item for sublist in query_result for item in", "len(breaks) == len(labels) + 1 or not (labels), ParameterError( \"Length", "{}) / {}({} - {})\".format( \"{}\", cmin, \"NULLIFZERO\" if (nullifzero)", "Rose chart. It can also be a cutomized aggregation (ex:", "= {} for elem in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True,", "method == \"null\": self.apply( func=\"(CASE WHEN ({} BETWEEN {} AND", "Parameters ---------- x: float Input number. Returns ------- vDataFrame self.parent", "drop_first: bool = True, use_numbers_as_suffix: bool = False, ): \"\"\"", "the vColumn is missing. Returns ------- vDataFrame self.parent See Also", "n == 1: pre_comp = self.parent.__get_catalog_value__(self.alias, \"top\") if pre_comp !=", "vColumn records. The vColumn will be transformed. Parameters ---------- pat:", "int = 5): \"\"\" --------------------------------------------------------------------------- Returns the tail of the", "self.alias, dtype ) ) return self.parent except Exception as e:", "the vColumn record by an input value. The vColumn will", "upper) if (isinstance(upper, (float, int))) else \"\" ) func =", "# \\ / _ __|_. _ _ |_) # \\/", "------- vDataFrame self.parent See Also -------- vDataFrame[].date_part : Extracts a", "|| ']'\".format( \"{}\", h, h, \"{}\", h, h, h, floor_end", "# ---# def memory_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn memory", "---# def store_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn expected store", "mapping:{}\".format( self.alias, text_info ) ) return self.parent # ---# def", "ts, by, start_date, end_date, area, step, ax=ax, **style_kwds, ) #", "See the License for the specific language governing permissions and", "Returns ------- float sum See Also -------- vDataFrame.aggregate : Computes", "was converted to {}.\".format( self.alias, dtype ) ) return self.parent", "\"\"\" check_types( [ (\"ts\", ts, [str]), (\"by\", by, [str]), (\"start_date\",", "the vColumn with One-Hot Encoding. \"\"\" check_types([(\"response\", response, [str])]) self.parent.are_namecols_in(response)", "optional Offset. Returns ------- tablesample An object containing the result.", "Any optional parameter to pass to the geopandas plot function.", "to in writing, software # distributed under the License is", "NULL values will not be considered during the computation. Returns", "distinct_count > max_cardinality: query += ( \"UNION ALL (SELECT 'Others',", "each of the vColumn records. vDataFrame[].extract : Extracts the regular", ") ] self.parent.__add_to_history__( \"[AsType]: The vColumn {} was converted to", "the lowest element of the list will be included. right:", "max_cardinality: int/tuple, optional Maximum number of distinct elements for vColumns", "name, parent=self.parent, transformations=[item for item in self.transformations], catalog=self.catalog, ) setattr(self.parent,", "apply_test_feature FROM {} WHERE {} IS NOT NULL LIMIT 0\".format(", "len(self.parent[response].transformations) - len( self.transformations ) for k in range(max_floor): self.transformations", "an independent variable in relation to the dependent variable. Parameters", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame.case_when : Creates", "-1 if (self.category() == \"int\") else \"\" if (h >", "maximum depth of 10. response: str, optional Response vColumn when", "( sys.getsizeof(self) + sys.getsizeof(self.alias) + sys.getsizeof(self.transformations) + sys.getsizeof(self.catalog) ) for", "the vColumn. If this parameter is equal to 0, an", "{}\".format(limit) else: limit = \"\" query = \"(SELECT {} FROM", "of the vColumn based on an aggregation. Parameters ---------- method:", "List of the different categories to consider when drawing the", "self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs) # ---# def get_dummies( self, prefix: str =", "END)\" trans = (trans, \"varchar\", \"text\") if return_enum_trans: return trans", "(\"method\", method, [str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [int, float]),", "can not be empty in case of discretization using the", "FROM {} LIMIT 10\".format( self.parent.__genSQL__(force_columns=force_columns) ), print_time_sql=False, ) self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias) delattr(self.parent,", "value. The vColumn will be transformed. Parameters ---------- to_replace: str", "/ NULLIFZERO(SUM(events) OVER ()) AS pt_events FROM ({}) x\".format( self.alias,", "str, optional Slice size unit. For example, it can be", "'of'. sum : Sum of the vColumn 'of'. q% :", "**updated_dict(param, style_kwds, idx), ) custom_lines += [ Line2D( [0], [0],", "(min and max). (x - min) / (max - min)", "the parameter method is set to 'cat_stats'. Returns ------- tablesample", "values higher than the upper bound to the upper bound", "the vColumn using 'product'. Returns ------- float product See Also", "grouping by elements, please use a method in zscore|minmax\" warnings.warn(warning_message,", "different categories to [0, n - 1] (n being the", "the nth most occurent element. Parameters ---------- dropna: bool, optional", "RFmodel_params: dict = {}, response: str = \"\", return_enum_trans: bool", "(count distinct) var : variance Other aggregations could work if", "vColumn will be transformed. Parameters ---------- pat: str regular expression.", "FROM {}\".format( self.alias, self.alias, self.alias, self.alias, self.alias, table ) result", "compliance with the License. # You may obtain a copy", "cot : trigonometric cotangent exp : exponential function floor :", "\"regr_avgy\": {}, \"regr_count\": {}, \"regr_intercept\": {}, \"regr_r2\": {}, \"regr_slope\": {},", "vDataFrame self.parent See Also -------- vDataFrame[].slice : Slices the vColumn", "- The MAD is null !\".format( self.alias ) warnings.warn(warning_message, Warning)", "0 OR events = 0 THEN 0 ELSE (pt_non_events -", ": Returns True if the vColumn category is date. vDataFrame[].isnum", "usage (byte) See Also -------- vDataFrame.memory_usage : Returns the vDataFrame", "of the parent vDataFrame. This parameter is used for testing", "---------- dropna: bool, optional If set to True, NULL values", "[str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h,", "= \"\", prefix_sep: str = \"_\", drop_first: bool = True,", "built-in analytics and machine learning features. It supports the #", "If set to True, the Median will be drawn. ax:", "AS ord, SUM(1 - {}) AS non_events, SUM({}) AS events", "the slicing. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains", "self.parent.format_colnames(column) columns += [column] if not (\"cmap\" in kwargs): from", ") self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias) delattr(self.parent, self.alias) except: self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"] += [self.alias] if add_history:", "the vColumn. transformations, str : List of the different transformations.", "\"\"\" --------------------------------------------------------------------------- Draws the Time Series of the vColumn. Parameters", ": most occurent element percent : percent of non-missing elements", "'numcol' must be numerical\" ) cast = \"::int\" if (self.parent[numcol].isbool())", "varchar. same_freq : Computes bins with the same number of", "= gen_tmp_name(schema=schema, name=\"view\") tmp_model_name = gen_tmp_name(schema=schema, name=\"model\") assert nbins >=", "vColumn based on an aggregation. Parameters ---------- method: str, optional", "the vColumn most occurent elements. \"\"\" if \"agg\" not in", "type must be date like (date, datetime, timestamp...) or numerical.", "vDataFrame[].apply : Applies a function to the input vColumn. \"\"\"", "assert not (self.parent.is_colname_in(name)), NameError( f\"A vColumn has already the alias", "== 0: warning_message = \"Can not normalize {} using the", "for the categorical vColumns. bfill : Back Propagation of the", "[bool]))]) prefix = \"approx_\" if approx else \"\" return self.aggregate(func=[prefix", "by 0 is forbidden !\") return self.apply(func=\"{} / ({})\".format(\"{}\", x))", "of discretization using the method 'topk'\" ) distinct = self.topk(k).values[\"index\"]", "use_numbers_as_suffix: bool = False, ): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn", "cmax, cmin, self.parent.__genSQL__() ), print_time_sql=False, ) except: cmax, cmin =", "avg = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [ \"{},", "'ts' is greater than November 1993 the 3rd. plot_median: bool,", "the method is 'cat_stats'\" ) distinct_count, is_numeric, is_date = (", "{}) WITHIN GROUP (ORDER BY {}) OVER () FROM {}", "val} return self.parent.isin(val) # ---# def isnum(self): \"\"\" --------------------------------------------------------------------------- Returns", "# ____________ ______ # / __ `\\ / / #", "executeSQL( query=query, title=\"Computing the descriptive statistics of {}.\".format(self.alias), method=\"fetchall\", )", "'{}'.\".format(old_name, new_name) ) return parent # ---# def round(self, n:", "def label_encode(self): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using a bijection", "== \"smart\": schema = verticapy.options[\"temp_schema\"] if not (schema): schema =", "each of the vColumn records by an input value. \"\"\"", "= offset tail.dtype[self.alias] = self.ctype() tail.name = self.alias return tail", "1e-99) if method.lower() == \"sturges\": best_h = sturges elif method.lower()", "assert n >= 1, ParameterError(\"Parameter 'n' must be greater or", ").format(self.alias, max_cardinality + 1) query = \"WITH vdf_table AS (SELECT", "method. auto : Sets the method to 'numerical' if the", "!= None else \"NULL\", elem[1] if elem[1] != None else", "\"s were \" if total > 1 else \" was", "AS 'approx_10%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.25) AS", "), title=title, ) tail.count = self.parent.shape()[0] tail.offset = offset tail.dtype[self.alias]", "1, len(by) if self.isbool(): warning_message = \"Normalize doesn't work on", "END AS woe, CASE WHEN non_events = 0 OR events", "input number of digits after the comma. Parameters ---------- n:", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "impute the missing values. auto : Mean for the numerical", "result], \"count\": [int(item[1]) for item in result], \"percent\": [float(round(item[2], 3))", "max(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'max' (Maximum). Returns", "breaks[idx], breaks[idx + 1] if right: op1, op2, close_l, close_r", "Plot of the input vColumns. \"\"\" if isinstance(cat_priority, str) or", "using the method 'smart'.\" ) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) drop(tmp_view_name,", "--------------------------------------------------------------------------- Returns the distinct categories of the vColumn. Returns -------", "\\ /\\ \"-./ \\ /\\ \"-.\\ \\ # \\ \\", "for item in executeSQL( query=query, title=\"Computing the average of the", "vDataFrame[].isdate : Returns True if the vColumn category is date.", "n) return to_tablesample(query, title=title) # ---# def normalize( self, method:", "), ) cmax = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join(", "END)\".format( \"{}\", p_alpha, p_1_alpha, \"{}\" ) ) elif method ==", "example, it can be 'minute' 'hour'... start: bool, optional If", "donut : Donut chart. rose : Rose chart. It can", ": Normalization using the Robust Z-Score (median and mad). (x", "Encoding. vDataFrame[].label_encode : Encodes the vColumn with Label Encoding. vDataFrame[].mean_encode", "relation to the dependent variable. Parameters ---------- y: str Response", "discretization using the method 'smart'.\" ) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response)", "func.replace(\"{}\", \"x\"), ) ) return self.parent except Exception as e:", "str : List of the different transformations. \"\"\" # #", "\"apply_test_feature\", ) except: ctype = get_data_types( \"SELECT {} AS apply_test_feature", "(category in (\"None\", None)) else \" WHERE {} = '{}'\".format(", "= std # ---# def store_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the", "= 6, nbins: int = 0, h: float = 0,", "numcol, cast, ) tmp_query += ( \" WHERE {} IS", "vDataFrame \"heavier\" if it is used to compute other vColumns.", "vColumn. The function variable must be composed of two flower", "from the vDataFrame.\".format(self.alias) ) return parent # ---# def drop_outliers(", "\"store_usage\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp store_usage = executeSQL(", "p_alpha, self.alias, self.alias, p_1_alpha, ) mean_alpha, mean_1_alpha = [ item[0]", "\" DESC OFFSET {1}) VERTICAPY_SUBTABLE) ORDER BY count DESC\" ).format(self.alias,", "1], [1, 0]) or self.isbool(): all_new_features = [] prefix =", "[str])]) try: try: ctype = get_data_types( \"SELECT {} AS apply_test_feature", "pass to the Matplotlib functions. Returns ------- ax Matplotlib axes", "method: str, optional Method to use to fill the vColumn", "+ desc for column in order_by]) new_column = \"COALESCE({}, LAST_VALUE({}", "vColumn \" \"or simply because of ambiguous columns naming.\\nBy changing", "element. Parameters ---------- x: float Input number. Returns ------- vDataFrame", "True): \"\"\" --------------------------------------------------------------------------- Drops the vColumn from the vDataFrame. Dropping", "the input element. Parameters ---------- x: float Input number. Returns", "[int, float]), (\"h\", h, [int, float]), (\"nbins\", nbins, [int, float]),", "= self.parent.search( \"{} = '{}'\".format(self.parent[by].alias, column) )[self.alias].density( bandwidth=bandwidth, kernel=kernel, nbins=nbins,", "self.isdate(), ) if (is_date) and not (method == \"categorical\"): result", "import gen_colors from matplotlib.lines import Line2D colors = gen_colors() if", "the pie chart of the vColumn based on an aggregation.", "def normalize( self, method: str = \"zscore\", by: list =", "1e-99, ) fd = max(2.0 * (vColumn_075 - vColumn_025) /", ": skewness sum : sum std : standard deviation topk", "argv[2 * i + 1] ... END odd : CASE", "and (method != \"numerical\")) or not (is_numeric) or (method ==", "((is_date) and not (method == \"categorical\")) or ( method ==", "threshold: float, optional Uses the Gaussian distribution to define the", "sem See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "List of the vColumns to use to sort the data", "ax=None, **style_kwds, ) # ---# def plot( self, ts: str,", "or equal to 1\") where = \" WHERE {} IS", "cat_stats : Computes statistics of a numerical column for each", "determine if the vColumn will be considered as categorical. numcol:", "Bar Chart of the input vColumns based on an aggregation.", "https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html Returns ------- ax Matplotlib axes object \"\"\" columns", "\"{}\", cmin, \"NULLIFZERO\" if (nullifzero) else \"\", cmax, cmin, ),", "float If the vColumn type is date like (date, datetime", "Applies functions to the input vColumns. vDataFrame.applymap : Applies a", "= \"SELECT {} FROM (SELECT {} AS {}, {} AS", "make the vDataFrame \"heavier\" if it is used to compute", "import * from verticapy.toolbox import * from verticapy.errors import *", "self.alias) except: self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"] += [self.alias] if add_history: self.parent.__add_to_history__( \"[Drop]: vColumn", "* COUNT({1}) / {2} AS percent, AVG({3}{4}) AS mean, STDDEV({3}{4})", "kurtosis See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "def mode(self, dropna: bool = False, n: int = 1):", "Aggregates the vColumn using multiple statistical aggregations: min, max, median,", "The method used for the plot. gaussian : Gaussian kernel.", "if isinstance(func, str_sql): func = str(func) check_types([(\"func\", func, [str]), (\"copy_name\",", "the vColumn. Parameters ---------- func: str Function to use to", "an aggregation. Parameters ---------- method: str, optional The method to", "\"\"\" check_types( [ (\"alpha\", alpha, [int, float]), (\"use_threshold\", use_threshold, [bool]),", "the 3rd. plot_median: bool, optional If set to True, the", "+= [ (\"AVG({}) OVER (PARTITION BY {})\".format(response, \"{}\"), \"int\", \"float\")", "+ [self.max()] elif method == \"topk\": assert k >= 2,", "= {} ): self.parent, self.alias, self.transformations = ( parent, alias,", "by: str = \"\", start_date: Union[str, datetime.datetime, datetime.date] = \"\",", "the vColumns categories. Returns ------- vDataFrame self.parent See Also --------", "\"MEDIAN\": val = self.median() new_column = \"COALESCE({}, {})\".format(\"{}\", val) elif", "also be a cutomized aggregation (ex: AVG(column1) + 5). of:", "= method.lower() assert (method != \"cat_stats\") or (numcol), ParameterError( \"The", "category in cat: tmp_query = \"\"\"SELECT '{0}' AS 'index', COUNT({1})", "category, ) ) query += [lp + tmp_query + rp]", "using 'sem' (standard error of mean). Returns ------- float sem", "**style_kwds) # ---# def category(self): \"\"\" --------------------------------------------------------------------------- Returns the category", "str Function to use to transform the vColumn. abs :", "self.alias, self.alias, ) else: query = \"SELECT {} FROM (SELECT", "the mode) topk_percent : kth most occurent element density unique", "2.\" ) assert len(breaks) == len(labels) + 1 or not", "BY {})\".format( self.alias, \", \".join(by) ), ) if return_trans: return", "response is numerical (except ints and bools), a RF Classifier", "result = executeSQL( \"SELECT {}, MIN({}), MAX({}) FROM {} GROUP", "by, [str]), (\"start_date\", start_date, [str, datetime.datetime, datetime.date]), (\"end_date\", end_date, [str,", "= self.median() new_column = \"COALESCE({}, {})\".format(\"{}\", val) elif (len(by) ==", "nbins: int = 10): \"\"\" --------------------------------------------------------------------------- Computes the Information Value", "------- vDataFrame self.parent See Also -------- vDataFrame.astype : Converts the", "/ date, optional Input Start Date. For example, time =", "vColumn. method: dict, optional Method to use to impute the", "if (use_numbers_as_suffix) else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) assert", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "categorical variables.\" ) warnings.warn(warning_message, Warning) else: distinct_elements = self.distinct() expr", "the result of using previously the method on the vColumn", "1], result[i] ) trans += \" ELSE NULL END)\" trans", "avg(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'avg' (Average). Returns", "self.aggregate([\"min\", \"max\"]).values[self.alias] if cmax - cmin == 0: warning_message =", "\\ \\_\\\\\"\\_\\ # \\/_/ \\/_____/ \\/_____/ \\/_____/ \\/_____/ \\/_/ \\/_/", "n = 1, len(by) if self.isbool(): warning_message = \"Normalize doesn't", "was transformed using a mean encoding with {} as Response", "FROM {} LIMIT 1\".format( avg, stddev, self.parent.__genSQL__() ), print_time_sql=False, )", "Logistic kernel. sigmoid : Sigmoid kernel. silverman : Silverman kernel.", "\"logistic\", \"sigmoid\", \"silverman\"]), (\"bandwidth\", bandwidth, [int, float]), (\"nbins\", nbins, [float,", ") else: for k in range(max_floor): self.transformations += [(\"{}\", self.ctype(),", "cast = \"::float\" if self.category() == \"float\" else \"\" if", "the different aggregations.\", method=\"fetchall\", ) for idx, elem in enumerate(result):", "(ex: 50% for the median) prod : product range :", "converted to {}\".format( e, self.alias, dtype ) ) # ---#", "be one of the following: date / int / float", "+= [ Line2D( [0], [0], color=updated_dict(param, style_kwds, idx)[\"color\"], lw=4, ),", "self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import hist return hist(self,", "DOY / EPOCH / HOUR / ISODOW / ISOWEEK /", "\"null\": self.apply( func=\"(CASE WHEN ({} BETWEEN {} AND {}) THEN", "method: str = \"auto\", expr: str = \"\", by: list", "and the values higher than the upper bound to the", "top = float(top) self.parent.__update_catalog__( {\"index\": [\"top{}\".format(n)], self.alias: [top]} ) return", "IS NOT NULL ORDER BY {} ASC LIMIT {}\".format( self.parent.__genSQL__(),", "decrease. Returns ------- float/str median See Also -------- vDataFrame.aggregate :", ": Converts the vColumns to the input type. \"\"\" check_types([(\"dtype\",", "range_curve_vdf return range_curve_vdf( self, ts, q, start_date, end_date, plot_median, ax=ax,", "only valid if the vColumns are numerical. Optimized h will", "AS count\" \" FROM vdf_table WHERE {0} IS NOT NULL", ") elif method in (\"ffill\", \"pad\", \"bfill\", \"backfill\"): assert order_by,", "y: str Response vColumn. nbins: int, optional Maximum number of", "expression. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_count :", ") method = method.lower() if method == \"auto\": pre_comp =", "if elem[0] != None else \"NULL\", elem[1] if elem[1] !=", "check_types( [ (\"by\", by, [str]), (\"method\", method, [str]), (\"of\", of,", "column in by]) ) if (by) else \"\" ) order_by_ts", "\"unique\", \"count\", \"mean\", \"std\", \"min\", \"approx_25%\", \"approx_50%\", \"approx_75%\", \"max\", ]", "See Also -------- vDataFrame.case_when : Creates a new feature by", ") if (is_date) and not (method == \"categorical\"): result =", "def fillna( self, val=None, method: str = \"auto\", expr: str", "kurtosis jb : Jarque-Bera index mad : median absolute deviation", "{}\".format( self.alias, self.alias, self.alias, self.parent.__genSQL__(), self.alias, where, ) result =", "elem[2] != None else \"NULL\", ) for elem in result", "--------------------------------------------------------------------------- Draws the Time Series of the vColumn. Parameters ----------", "the vColumn distinct elements to be used as categorical (No", "threshold: float, optional Uses the Gaussian distribution to identify outliers.", "), (\"x\", x, [int, float]), ] ) if func not", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame.outliers : Computes", "alias: str vColumn alias. transformations: list, optional List of the", "(n == 1) and (self.parent[by[0]].nunique() < 50): try: result =", "to use to compute the aggregation. max_cardinality: int, optional Maximum", "to_tablesample(query, title=title) result.values[\"index\"] += [\"total\"] result.values[\"non_events\"] += [sum(result[\"non_events\"])] result.values[\"events\"] +=", "{} {} missing value{} filled.\".format( total, self.alias, conj, ) )", "0]) or self.isbool(): all_new_features = [] prefix = ( self.alias.replace('\"',", "bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=(xmin, xmax), ax=ax, **updated_dict(param, style_kwds, idx), )", "vDataFrame expected store usage. \"\"\" pre_comp = self.parent.__get_catalog_value__(self.alias, \"store_usage\") if", "column = self.parent.format_colnames(column) columns += [column] if not (\"cmap\" in", "math, re, decimal, warnings, datetime from collections.abc import Iterable from", "aggregations: min, max, median, unique... depending on the input method.", "# \\/ # _ # \\ / _ __|_. _", "ints and bools), a RF Classifier otherwise. Example: Write {\"n_estimators\":", "new_vColumn) setattr(self.parent, name[1:-1], new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] self.parent.__add_to_history__( \"[Add Copy]:", "be able to look at the final transformation. Returns -------", "= ( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False ) .transpose() .values[self.alias] )", "self.catalog[\"count\"] = self.parent.shape()[0] self.catalog[\"percent\"] = 100 self.parent.__add_to_history__( \"[Label Encoding]: Label", "{} WHERE {} IS NOT NULL LIMIT 0\".format( func.replace(\"{}\", self.alias),", "= None if not (result) else result[0][0] if not (dropna):", "0.25 represents Q1. approx: bool, optional If set to True,", "of the response partitioned by the different vColumn categories. Parameters", "the vColumn using 'count' (Number of non-Missing elements). Returns -------", "the method 'topk'\" ) distinct = self.topk(k).values[\"index\"] trans = (", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'sum'. Returns ------- float", "be picked or computed) h: float, optional Interval width of", ") self.parent[y].distinct() trans = self.discretize( method=\"same_width\" if self.isnum() else \"topk\",", "arithmetic sign sin : trigonometric sine sinh : hyperbolic sine", ": Encodes the vColumn with One-Hot Encoding. vDataFrame[].mean_encode : Encodes", ") if (by) else \"\" ) order_by_ts = \", \".join([quote_ident(column)", "self.apply(func=func) return self.parent # ---# def count(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "k, [int, float]), (\"dropna\", dropna, [bool])]) topk = \"\" if", "KIND, either express or implied. # See the License for", "so it's better practice to use this method when first", "Draws the pie chart of the vColumn based on an", "\"NULLIFZERO\" if (nullifzero) else \"\", stddev ), \"float\", \"float\", )", "using 'max' (Maximum). Returns ------- float/str maximum See Also --------", "datetime.date, int, float], ), (\"plot_median\", plot_median, [bool]), ] ) self.parent.are_namecols_in(ts)", "= \"WITH vdf_table AS (SELECT * FROM {}) (SELECT AVG({})", "in order_by]) new_column = \"COALESCE({}, LAST_VALUE({} IGNORE NULLS) OVER ({}", "occurent element). 0ifnull : 0 when the vColumn is null,", "self.parent.__get_last_order_by__(), index, ) return executeSQL( query=query, title=\"Getting the vColumn element.\",", ") distinct_count, is_numeric, is_date = ( self.nunique(), self.isnum(), self.isdate(), )", "float Input number. Returns ------- vDataFrame self.parent See Also --------", "vColumn. Returns ------- str/float/int vColumn nth most occurent element. See", "is part of the DB version you are using. Returns", "[ (\"prefix\", prefix, [str]), (\"prefix_sep\", prefix_sep, [str]), (\"drop_first\", drop_first, [bool]),", "float]), (\"response\", response, [str]), (\"nbins\", nbins, [int, float]), ( \"method\",", "absolute deviation max : maximum mean : average median :", "After normalizing the data (Z-Score), if the absolute value of", ": Parent of the vColumn. transformations, str : List of", "None else \"NULL\", elem[1] if elem[1] != None else \"NULL\",", "int]), ] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) columns", "Modules import math, re, decimal, warnings, datetime from collections.abc import", "encoding with {} as Response Column.\".format( self.alias, response ) )", "self.alias return tail # ---# def isbool(self): \"\"\" --------------------------------------------------------------------------- Returns", "self.alias, result[\"avg\"][0], result[\"std\"][0], threshold ) ) else: p_alpha, p_1_alpha =", ": Uses only categorical aggregations during the computation. cat_stats :", "very useful for testing to be able to look at", "if len(all_new_features) > 1 else \" was \" self.parent.__add_to_history__( \"[Get", "compute the best splits when 'method' is set to 'smart'.", "the data\" ) desc = \"\" if (method in (\"ffill\",", "ax Matplotlib axes object See Also -------- vDataFrame[].hist : Draws", "trans = ( \"'[' || FLOOR({} / {}) * {}", "a time series rule. Parameters ---------- length: int Slice size.", "a ‘pipeline’ mechanism to sequentialize # data transformation operations, and", "{} as Response Column.\".format( self.alias, response ) ) if verticapy.options[\"print_info\"]:", "self.describe(method=\"categorical\", max_cardinality=k) # ---# def var(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "= 0 self.catalog[\"mad\"] = 1 / 1.4826 elif method ==", "to return. dropna: bool, optional If set to True, NULL", "to 'smart'. return_enum_trans: bool, optional Returns the transformation instead of", "# # Unless required by applicable law or agreed to", "in (\"same_width\", \"auto\"): if not (h) or h <= 0:", "Rounds the vColumn by keeping only the input number of", "{}, MIN({}), MAX({}) FROM {} GROUP BY {}\".format( by[0], self.alias,", "] ), ) cmax = \"DECODE({}, {}, NULL)\".format( by[0], \",", "[lp + tmp_query + rp] query = \"WITH vdf_table AS", "\"float\", \"float\" elif method == \"0ifnull\": category, ctype = \"int\",", "pass elif math.isnan(result[i][2]): result[i][2] = None avg = \"DECODE({}, {},", "value{} filled.\".format( total, self.alias, conj, ) ) else: if verticapy.options[\"print_info\"]:", "partition the data. h: float, optional Interval width if the", "\"(SELECT {} FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE\".format( self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(),", ": Drops the vColumn missing values. \"\"\" if isinstance(by, str):", "not (schema): schema = \"public\" name = gen_tmp_name(schema=schema, name=\"kde\") if", "warnings.warn(warning_message, Warning) return self elif (n == 1) and (self.parent[by[0]].nunique()", "using the input expression. \"\"\" self.parent.filter(\"{} IS NOT NULL\".format(self.alias)) return", "BY {})\".format(response, \"{}\"), \"int\", \"float\") ] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Mean", "List of the different records. For example, to check if", "if isinstance(cat_priority, str) or not (isinstance(cat_priority, Iterable)): cat_priority = [cat_priority]", "VERTICAPY_SUBTABLE\".format( self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index_start, limit, ) return vDataFrameSQL(query) elif", "import bar return bar(self, method, of, max_cardinality, nbins, h, ax=ax,", "import sys total = ( sys.getsizeof(self) + sys.getsizeof(self.alias) + sys.getsizeof(self.transformations)", "case of discretization using the method 'smart'.\" ) assert response,", "OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ), ) if", "sliced using the floor of the slicing instead of the", "(SELECT * FROM {}) {}\".format( self.parent.__genSQL__(), \" UNION ALL \".join(query)", "Also -------- vDataFrame[].decode : Encodes the vColumn with user defined", "a vColumn can make the vDataFrame \"heavier\" if it is", "as an outlier. use_threshold: bool, optional Uses the threshold instead", "list, optional vColumns used in the partition. return_trans: bool, optimal", "def __nonzero__(self): return self.count() > 0 # ---# def __repr__(self):", "occurent element percent : percent of non-missing elements q% :", "- min) / (max - min) by: list, optional vColumns", "start_date, [str, datetime.datetime, datetime.date, int, float], ), ( \"end_date\", end_date,", "= model.fit(self.parent.__genSQL__(), [self.alias]).plot( ax=ax, **style_kwds ) model.drop() return result except:", "THEN 0 ELSE (pt_non_events - pt_events) * ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events)))", "self.count()] + [item[1] for item in query_result] index = [\"unique\",", "is used to compute other vColumns. Parameters ---------- add_history: bool,", "input vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) return self.apply(func=\"{} *", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"stddev\"]).values[self.alias][0] stddev = std", "\"index\": [item[0] for item in result], \"count\": [int(item[1]) for item", "RandomForestClassifier, RandomForestRegressor, ) drop(tmp_model_name, method=\"model\") if self.parent[response].category() == \"float\": model", "in kwargs: query = \"SELECT {} AS {} FROM {}", "was added to the vDataFrame.\".format( self.alias, name ) ) return", "import Iterable from typing import Union # VerticaPy Modules import", "APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.75) AS Q3, MAX({}) AS", "response = self.parent.format_colnames(response) drop(tmp_view_name, method=\"view\") self.parent.to_db(tmp_view_name) from verticapy.learn.ensemble import (", "unit: str = \"second\", start: bool = True): \"\"\" ---------------------------------------------------------------------------", "approx_unique : approximative cardinality count : number of non-missing elements", "if (self.parent[numcol].isbool()) else \"\" query, cat = [], self.distinct() if", "model = RandomForestClassifier(tmp_model_name) model.set_params({\"n_estimators\": 20, \"max_depth\": 8, \"nbins\": 100}) model.set_params(RFmodel_params)", "/ ISOWEEK / ISOYEAR / MICROSECONDS / MILLENNIUM / MILLISECONDS", "the vDataFrame where the vColumn is missing. Returns ------- vDataFrame", "= self.parent[by].distinct() for idx, column in enumerate(columns): param = {\"color\":", "h: float = 0, nbins: int = -1, k: int", "category)] self.parent[copy_name].catalog = self.catalog self.parent.__add_to_history__( \"[Apply]: The vColumn '{}' was", "len(labels) + 1 or not (labels), ParameterError( \"Length of parameter", "Returns the distinct categories of the vColumn. Returns ------- list", "= self.count() if method not in [\"mode\", \"0ifnull\"]: max_floor =", "a cutomized aggregation (ex: AVG(column1) + 5). of: str, optional", "Also -------- vDataFrame[].tail : Returns the a part of the", "NULL \".format(self.alias) if (dropna) else \" \" result = executeSQL(", "lower=None, upper=None): \"\"\" --------------------------------------------------------------------------- Clips the vColumn by transforming the", "(/_| | |(_(_|| \\/ # / # VerticaPy is a", "self.transformations[-1][2] # ---# def clip(self, lower=None, upper=None): \"\"\" --------------------------------------------------------------------------- Clips", "parameter 'breaks' must be greater or equal to 2.\" )", "to use a mean encoding\" ) max_floor = len(self.parent[response].transformations) -", "result = [distinct_count, self.count()] + [item[1] for item in query_result]", "of, [str]), (\"max_cardinality\", max_cardinality, [list]), (\"h\", h, [list, float, int]),", "(SELECT split_value, MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE WHERE split_value IS NOT", "AS max FROM {}\".format( self.alias, self.alias, self.alias, self.alias, self.alias, table", "in kwargs: column = kwargs[\"column\"] else: check = False if", "(method == \"categorical\") ): query = \"\"\"(SELECT {0} || '',", "method=\"view\") drop(tmp_model_name, method=\"model\") result = [self.min()] + result + [self.max()]", "the vColumn. Parameters ---------- ts: str TS (Time Series) vColumn", "\\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\", "result}) for elem in values: for i in range(len(values[elem])): if", "function to the input vColumn. \"\"\" return self.apply(func=\"ABS({})\") # ---#", "time series. \"\"\" check_types( [ (\"ts\", ts, [str]), (\"q\", q,", "(\"bandwidth\", bandwidth, [int, float]), (\"nbins\", nbins, [float, int]), ] )", "try: if \"count\" in sauv: self.catalog[\"count\"] = sauv[\"count\"] self.catalog[\"percent\"] =", "\"SELECT {}, AVG({}), STDDEV({}) FROM {} GROUP BY {}\".format( by[0],", "otherwise. categorical : Uses only categorical aggregations during the computation.", "VerticaPy aims to do all of the above. The idea", "transformations=transformations, catalog={ \"min\": 0, \"max\": 1, \"count\": self.parent.shape()[0], \"percent\": 100.0,", "are in the vDataFrame. \"\"\" if isinstance(val, str) or not", "{}. For example to apply the function: x -> x^2", "return self.apply(func=\"TIMESTAMPADD(SECOND, -({}), {})\".format(x, \"{}\")) else: return self.apply(func=\"{} - ({})\".format(\"{}\",", "cardinality count : number of non-missing elements cvar : conditional", "set to True, draw an Area Plot. step: bool, optional", "query=query, title=\"Computing the descriptive statistics of {}.\".format(self.alias), method=\"fetchall\", ) result", "raised to the power of another number round : rounds", "to get the median). It can also be a cutomized", "have multiple children vColumns whereas one vColumn can only have", "must be a vDataFrame column if the method is 'cat_stats'\"", "lower and upper ZScores. threshold: float, optional Uses the Gaussian", "optional Method to use to compute the optimal h. auto", "{} was renamed '{}'.\".format(old_name, new_name) ) return parent # ---#", "\"\" return self.aggregate(func=[prefix + \"{}%\".format(x * 100)]).values[self.alias][ 0 ] #", "vColumn elements. \"\"\" check_types([(\"n\", n, [int, float])]) query = \"SELECT", "computation. cat_stats : Computes statistics of a numerical column for", "normalize. zscore : Normalization using the Z-Score (avg and std).", "Methods # # ---# def __init__( self, alias: str, transformations:", "new_vColumn = vColumn( name, parent=self.parent, transformations=[item for item in self.transformations],", "return self elif n == 1: try: result = executeSQL(", "2, \"approx_unique\": 2, \"prod\": 0, }, ) setattr(self.parent, name, new_vColumn)", "quantile). See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "Extracts a specific TS field from the vColumn. \"\"\" check_types(", "self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) result =", "to use to aggregate the data. count : Number of", "*args, **kwargs): \"\"\" --------------------------------------------------------------------------- Draws the Geospatial object. Parameters ----------", "self.catalog[elem] = (sauv[elem] - sauv[\"mean\"]) / sauv[ \"std\" ] elif", "data. The vColumn type must be date like (date, datetime,", "self.parent, self.alias, self.transformations = ( parent, alias, [elem for elem", "\"robust_zscore\": if n > 0: warning_message = \"The method 'robust_zscore'", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'count' (Number of non-Missing", "the vColumn. vDataFrame[].tail : Returns the tail of the vColumn.", "--------------------------------------------------------------------------- Aggregates the vColumn using 'count' (Number of non-Missing elements).", "------- str vColumn category. See Also -------- vDataFrame[].ctype : Returns", "\"Normalize doesn't work on booleans\".format(self.alias) warnings.warn(warning_message, Warning) elif self.isnum(): if", "= \"Can not normalize {} using a Robust Z-Score -", "list of at least one element to use to order", "the Random Forest model parameters used to compute the best", "str): \"\"\" --------------------------------------------------------------------------- Converts the vColumn to the input type.", "encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes the", "approximative cardinality count : number of non-missing elements cvar :", "--------------------------------------------------------------------------- Converts the vColumn to the input type. Parameters ----------", "ORDER BY count DESC\" ).format(self.alias, max_cardinality + 1) query =", "= index_stop - index_start if limit <= 0: limit =", "def numh(self, method: str = \"auto\"): \"\"\" --------------------------------------------------------------------------- Computes the", "discretization using the method 'smart'.\" ) assert response, ParameterError( \"Parameter", "of \" \"the parameters ('prefix', 'prefix_sep'), you'll be able to", "method. RFmodel_params: dict, optional Dictionary of the Random Forest model", "== 1: try: result = executeSQL( \"SELECT {}, MIN({}), MAX({})", "fd = max(2.0 * (vColumn_075 - vColumn_025) / (count) **", "or equal to 2.\" ) assert len(breaks) == len(labels) +", "Also -------- vDataFrame[].describe : Computes the vColumn descriptive statistics. \"\"\"", "\\ \\ \\____ \\ \\ \\_\\ \\ \\ \\ \\-./\\", "cardinality (count distinct) var : variance Other aggregations could work", "/ # _____________ / / # \\ / / /", "-------- vDataFrame[].apply : Applies a function to the input vColumn.", "of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import pie return", "Donut chart. rose : Rose chart. It can also be", "20 trees and a maximum depth of 10. response: str,", "in copy_trans] for elem in sauv: self.catalog[elem] = sauv[elem] return", "\"NULL\" if (elem[1] == None) else str(elem[1]) new_column = \"COALESCE({},", "MILLENNIUM / MILLISECONDS / MINUTE / MONTH / QUARTER /", "self.parent.__update_catalog__( {\"index\": [\"store_usage\"], self.alias: [store_usage]} ) return store_usage # ---#", "threshold, it will be considered as an outlier. use_threshold: bool,", "Silverman kernel. nbins: int, optional Maximum number of points to", "---------- x: float If the vColumn type is date like", "input aggregations. \"\"\" check_types([(\"approx\", approx, [bool])]) if approx: return self.aggregate(func=[\"approx_unique\"]).values[self.alias][0]", "ORDER BY {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.alias, self.alias, self.alias,", "deleted from the vDataFrame.\".format(self.alias) ) return parent # ---# def", "n > 0: warning_message = \"The method 'robust_zscore' is available", "and method == \"same_freq\") or ( self.isnum() and method ==", "# ---# def abs(self): \"\"\" --------------------------------------------------------------------------- Applies the absolute value", "WOE & IV of {} (response = {}).\".format(self.alias, y) result", "vColumn_075, vColumn_max = result sturges = max( float(vColumn_max - vColumn_min)", "vDataFrame self.parent See Also -------- vDataFrame.outliers : Computes the vDataFrame", "= self.aggregate([\"min\", \"max\"]).values[self.alias] if cmax - cmin == 0: warning_message", "self.catalog[elem] = None elif method == \"robust_zscore\": self.catalog[elem] = (sauv[elem]", "str): by = [by] check_types( [ (\"method\", method, [\"zscore\", \"robust_zscore\",", "bins.\", method=\"fetchall\", ) result = [elem[0] for elem in result]", "\"\"\" --------------------------------------------------------------------------- Renames the vColumn by dropping the current vColumn", "optional Prefix delimitor of the dummies. drop_first: bool, optional Drops", "in kwargs): from verticapy.plot import gen_cmap kwargs[\"cmap\"] = gen_cmap()[0] else:", "int, optional Number of bins. If empty, an optimized number", "new_name) ) return parent # ---# def round(self, n: int):", "(n being the vColumn cardinality). Returns ------- vDataFrame self.parent See", "------- vDataFrame self.parent See Also -------- vDataFrame.case_when : Creates a", "or ( method == \"is_numeric\" ): self.parent.__update_catalog__({\"index\": index, self.alias: result})", "on the input method. Parameters ---------- method: str, optional The", "math.isnan(result[i][2]): result[i][2] = None avg = \"DECODE({}, {}, NULL)\".format( by[0],", "---------- alias, str : vColumn alias. catalog, dict : Catalog", "self.parent.aggregate(func=func, columns=[self.alias]).transpose() agg = aggregate # ---# def apply(self, func:", "\".join([quote_ident(column) for column in by]) ) if (by) else \"\"", "\"SELECT {} AS index, non_events, events, pt_non_events, pt_events, CASE WHEN", "warnings.warn(warning_message, Warning) return self.parent if isinstance(val, str): val = val.replace(\"'\",", "or self.isdate(), TypeError( \"cut only works on numerical / date-like", "\"\"\" columns = [self.alias] check = True if len(args) >", "\"\" for category in cat: tmp_query = \"\"\"SELECT '{0}' AS", "object. Parameters ---------- *args / **kwargs Any optional parameter to", "vColumn to the input type. Parameters ---------- dtype: str New", "vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND,", "WHERE {} IS NOT NULL ORDER BY {} DESC LIMIT", "step: bool = False, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws", "ts, q, start_date, end_date, plot_median, ax=ax, **style_kwds, ) # ---#", "vColumn. abs : absolute value acos : trigonometric inverse cosine", "result.values[\"index\"] += [\"total\"] result.values[\"non_events\"] += [sum(result[\"non_events\"])] result.values[\"events\"] += [sum(result[\"events\"])] result.values[\"pt_non_events\"]", "date like (date, datetime ...), the parameter 'x' will represent", "() FROM {} LIMIT 1\".format( alpha, self.alias, alpha, self.alias, self.parent.__genSQL__()", "vDataFrame column if the method is 'cat_stats'\" ) distinct_count, is_numeric,", "= ( \"NULL\" if (elem[0] == None) else \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\"))", "= \"\", return_enum_trans: bool = False, ): \"\"\" --------------------------------------------------------------------------- Discretizes", "median( self, approx: bool = True, ): \"\"\" --------------------------------------------------------------------------- Aggregates", "/ int(math.floor(math.log(count, 2) + 2)), 1e-99, ) fd = max(2.0", "old_name = quote_ident(self.alias) new_name = new_name.replace('\"', \"\") assert not (self.parent.is_colname_in(new_name)),", "\"min\", \"approx_25%\", \"approx_50%\", \"approx_75%\", \"max\", ] if method != \"cat_stats\":", "a function to the vColumn. Parameters ---------- func: str, Function", "in result], \"count\": [int(item[1]) for item in result], \"percent\": [float(round(item[2],", "-threshold * result[\"std\"][0] + result[\"avg\"][0], threshold * result[\"std\"][0] + result[\"avg\"][0],", "\"MEDIAN\" if (method == \"median\") else \"AVG\" if by ==", "if pie_type == \"donut\" else False rose = True if", ") cmax = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [", "self.parent except Exception as e: raise ConversionError( \"{}\\nThe vColumn {}", "AVG({}) FROM vdf_table WHERE {} > {})\".format( self.parent.__genSQL__(), self.alias, self.alias,", "available for categorical variables.\" ) warnings.warn(warning_message, Warning) else: distinct_elements =", "For example, if n = 1 then this method will", "transformed. Parameters ---------- pat: str Regular expression. Returns ------- vDataFrame", "step, [int, float])]) return self.apply(func=\"SUBSTR({}, {}, {})\".format(\"{}\", start, step)) #", "(nullifzero) else \"\", stddev ) else: final_transformation = [ (", "Tuple including the 2 quantiles used to draw the Plot.", "a method in zscore|minmax\" warnings.warn(warning_message, Warning) return self mad, med", "while total < int(float(count / int(nbins))) * int(nbins): nth_elems +=", "to [0, n - 1] (n being the vColumn cardinality).", "for elem in sauv: if \"top\" in elem: if \"percent\"", "product range : difference between the max and the min", "The type of pie chart. auto : Regular pie chart.", "\"mod\", \"pow\", \"round\", \"sign\", \"sin\", \"sinh\", \"sqrt\", \"tan\", \"tanh\", ],", "the vDataFrame input aggregations. \"\"\" return self.aggregate(func=[\"prod\"]).values[self.alias][0] prod = product", "cut the vColumn. labels: list, optional Labels used to name", "END)\".format( \"{}\", p_alpha, mean_alpha, \"{}\", p_1_alpha, mean_1_alpha, \"{}\" ) )", "vColumn is numerical, False otherwise. Returns ------- bool True if", "bin_spatial_to_str(self.category()), new_category.replace(\"'\", \"''\"), ), \"varchar\", \"text\", ) elif self.isnum() and", "self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) drop(tmp_view_name, method=\"view\") self.parent.to_db(tmp_view_name) from verticapy.learn.ensemble import", "(Maximum). Returns ------- float/str maximum See Also -------- vDataFrame.aggregate :", "+ 2 use \"POWER({}, 2) + 2\". copy_name: str, optional", "or ( re.search( re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"', \"\"))), func ) ): max_floor =", "{} || '' ELSE '{}' END)\".format( bin_spatial_to_str(self.category()), \", \".join( [", "if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import ts_plot", "the vColumn (delimited by an offset and a limit). Parameters", "optional Number of elements to display. offset: int, optional Number", "\"auto\"])] ) method = method.lower() if method == \"auto\": pre_comp", "aggregation (ex: AVG(column1) + 5). ax: Matplotlib axes object, optional", "start, [int, float]), (\"step\", step, [int, float])]) return self.apply(func=\"SUBSTR({}, {},", "Average. median : median. mode : mode (most occurent element).", "= \"::float\" if self.category() == \"float\" else \"\" if index", "\"int\")] new_vColumn = vColumn( name, parent=self.parent, transformations=transformations, catalog={ \"min\": 0,", "elem in by: if len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations)", "useful for testing to be able to look at the", "if (self.isnum() and method == \"same_freq\") or ( self.isnum() and", "down to the next whole number ln : natural logarithm", "trans = \"(CASE \" for i in range(1, n): trans", "} if ((is_date) and not (method == \"categorical\")) or (", "[list]), ] ) method = method.lower() self.parent.are_namecols_in([elem for elem in", "the discretization (must be > 1) k: int, optional The", "intervals should be closed. If set to True, the intervals", "--------------------------------------------------------------------------- Discretizes the vColumn using the input method. Parameters ----------", "in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) total = abs(self.count()", "should be closed. If set to True, the intervals will", "USING PARAMETERS percentile = 0.1) AS 'approx_10%', APPROXIMATE_PERCENTILE ({3}{4} USING", "the vColumn. vDataFrame[].str_slice : Slices the vColumn. \"\"\" check_types([(\"to_replace\", to_replace,", "assert nbins >= 2, ParameterError( \"Parameter 'nbins' must be greater", "list. Parameters ---------- breaks: list List of values used to", "expression. \"\"\" check_types([(\"name\", name, [str])]) name = quote_ident(name.replace('\"', \"_\")) assert", "# ---# def fillna( self, val=None, method: str = \"auto\",", "def str_count(self, pat: str): \"\"\" --------------------------------------------------------------------------- Computes the number of", "Method to use to compute the optimal h. auto :", "self.aggregate([\"stddev\"]).values[self.alias][0] stddev = std # ---# def store_usage(self): \"\"\" ---------------------------------------------------------------------------", "pure SQL used to transform the vColumn. The function variable", "and it returns the new vDataFrame of the search. Parameters", "axes object See Also -------- vDataFrame.bar : Draws the Bar", "h will be computed. pie_type: str, optional The type of", "else \"NULL\", ) for elem in result if elem[2] !=", "for column in self.parent._VERTICAPY_VARIABLES_[\"columns\"] ] force_columns.remove(self.alias) executeSQL( \"SELECT * FROM", "not be empty in case of discretization using the method", "work on booleans\".format(self.alias) warnings.warn(warning_message, Warning) elif self.isnum(): if method ==", "vDataFrame, optional Parent of the vColumn. One vDataFrame can have", "then this method will return the mode of the vColumn.", "try: result = model.fit(self.parent.__genSQL__(), [self.alias]).plot( ax=ax, **style_kwds ) model.drop() return", "most frequent categories and merge the other into one unique", "\"[\" elif idx == 0: op1, close_l = \"<\", \"]\"", "computed) nbins: int, optional Number of bins. If empty, an", "func = \"(CASE {}{}ELSE {} END)\".format(lower_when, upper_when, \"{}\") self.apply(func=func) return", "{}\".format( self.parent.__genSQL__(), \" UNION ALL \".join(query) ) title = \"Describes", "alpha: float, optional Number representing the outliers threshold. Values lesser", "as categorical (No h will be picked or computed) h:", "statistics. \"\"\" check_types([(\"k\", k, [int, float]), (\"dropna\", dropna, [bool])]) topk", "to True, the Median will be drawn. ax: Matplotlib axes", "\"SELECT {} FROM {} LIMIT 1\".format( new_column.format(self.alias), self.parent.__genSQL__() ), print_time_sql=False,", "\"\" query = \"(SELECT {} FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE\".format(", "= abs(self.count() - total) except Exception as e: self.transformations =", "self.mode(dropna=True) if val == None: warning_message = \"The vColumn {}", "\"\", bandwidth: float = 1.0, kernel: str = \"gaussian\", nbins:", "IS NOT NULL GROUP BY {} ORDER BY {}\".format( bin_spatial_to_str(self.category(),", "supports the # entire data science life cycle, uses a", "(date, datetime, timestamp...) or numerical. by: str, optional vColumn to", "__ __ __ __ # /\\ \\ / / /\\", "(\"alpha\", alpha, [int, float]), (\"use_threshold\", use_threshold, [bool]), (\"threshold\", threshold, [int,", "= self.parent.format_colnames(of) from verticapy.plot import bar return bar(self, method, of,", "columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) count, vColumn_min, vColumn_025, vColumn_075,", "---# def isnum(self): \"\"\" --------------------------------------------------------------------------- Returns True if the vColumn", "expr) elif method == \"0ifnull\": new_column = \"DECODE({}, NULL, 0,", "the func 'x -> {}'.\".format( self.alias.replace('\"', \"\"), func.replace(\"{}\", \"x\"), )", "if the vColumn is numerical. \"\"\" return self.ctype().lower() in (\"bool\",", "= method.lower() check_types( [ (\"method\", method, [\"winsorize\", \"null\", \"mean\"]), (\"alpha\",", "# # Standard Python Modules import math, re, decimal, warnings,", "(\"freedman_diaconis\", \"fd\"): best_h = fd else: best_h = max(sturges, fd)", "result.values[\"pt_events\"] += [\"\"] result.values[\"woe\"] += [\"\"] result.values[\"iv\"] += [sum(result[\"iv\"])] return", "self.alias, query, ) query = \"SELECT {} AS index, non_events,", "h, [int, float]), (\"pie_type\", pie_type, [\"auto\", \"donut\", \"rose\"]), ] )", "NULL)\".format( by[0], \", \".join( [ \"{}, {}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if", "\"SELECT {}{} FROM {}{} OFFSET {} LIMIT 1\".format( self.alias, cast,", "log10 : base 10 logarithm mod : remainder of a", "# VerticaPy Modules import verticapy from verticapy.utilities import * from", "Percentage of the distribution. mean : Average of the vColumn", "kwargs[\"figsize\"] = (14, 10) return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs) # ---# def", "for i in range(len(values[elem])): if isinstance(values[elem][i], decimal.Decimal): values[elem][i] = float(values[elem][i])", "datetime.datetime, datetime.date] = \"\", end_date: Union[str, datetime.datetime, datetime.date] = \"\",", "use to evaluate the approximate density function. Increasing this parameter", "boxplot( self, by: str = \"\", h: float = 0,", "Start Date. For example, time = '03-11-1993' will filter the", "the n largest vColumn elements. \"\"\" check_types([(\"n\", n, [int, float])])", "[list]), (\"return_trans\", return_trans, [bool]), ] ) method = method.lower() self.parent.are_namecols_in(by)", "method=\"fetchfirstelem\", ) else: return getattr(self, index) # ---# def __len__(self):", "h: float = 0, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws", "isinstance(method, str): method = method.lower() check_types( [ (\"method\", method, [\"winsorize\",", "(result) else result[0][0] if not (dropna): n = \"\" if", "BY _verticapy_cnt_ DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_ ASC", "else: label = f\"{close_l}{first_elem};{second_elem}{close_r}\" conditions += [ f\"'{first_elem}' {op1} {column}", "by]) ) if (by) else \"\" ) order_by_ts = \",", "and creating a copy with the specified name. \\u26A0 Warning", "return vDataFrameSQL(query) elif isinstance(index, int): cast = \"::float\" if self.category()", "Slices the vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_COUNT({},", "> max_cardinality: query += ( \"UNION ALL (SELECT 'Others', SUM(count)", "WHEN \".join(conditions) + \" END\" self.apply(func=expr) # ---# def ctype(self):", ") except: new_column = \"COALESCE({}, {}({}) OVER (PARTITION BY {}))\".format(", "Aggregates the vColumn using 'median'. Parameters ---------- approx: bool, optional", "{}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.shape()[0], self.parent.__genSQL__(), dropna, self.alias, topk, )", "events, non_events / NULLIFZERO(SUM(non_events) OVER ()) AS pt_non_events, events /", "100 * COUNT({1}) / {2} AS percent, AVG({3}{4}) AS mean,", "date, optional Input End Date. For example, time = '03-11-1993'", "50% for the median) prod : product range : difference", "{}, \"regr_slope\": {}, \"regr_sxx\": {}, \"regr_sxy\": {}, \"regr_syy\": {}, }", "expression. by: list, optional vColumns used in the partition. order_by:", "= MIN !\".format( self.alias ) warnings.warn(warning_message, Warning) return self elif", "0 is forbidden !\") return self.apply(func=\"{} / ({})\".format(\"{}\", x)) #", "of = self.parent.format_colnames(of) from verticapy.plot import hist return hist(self, method,", "= 0 self.catalog[\"max\"] = 1 self.parent.__add_to_history__( \"[Normalize]: The vColumn '{}'", "self.count() nb = int(float(count / int(nbins))) assert nb != 0,", "by[0], fun, self.alias, self.parent.__genSQL__(), by[0] ) result = executeSQL( query,", "optional If set to True, draw an Area Plot. step:", "pat: str regular expression. Returns ------- vDataFrame self.parent See Also", "transformations. Each transformation must be similar to the following: (function,", "\"\"\" --------------------------------------------------------------------------- Encodes the vColumn using a bijection from the", "float]), (\"numcol\", numcol, [str]), ] ) method = method.lower() assert", "NULL GROUP BY 1 ORDER BY 2 DESC LIMIT {})", "is null, 1 otherwise. expr: str, optional SQL expression. by:", "__ __ # /\\ \\ / / /\\ ___\\ /\\", "\"END\" return self.apply( func=\"TIME_SLICE({}, {}, '{}', '{}')\".format( \"{}\", length, unit.upper(),", "able to solve this \" \"issue.\" ) for k in", "__ __ __ __ __ # /\\ \\ / /", "\"boolean\") # ---# def isdate(self): \"\"\" --------------------------------------------------------------------------- Returns True if", "{} IS NOT NULL) VERTICAPY_SUBTABLE {}\".format( self.alias, self.alias, self.alias, self.parent.__genSQL__(),", "category, ctype = \"float\", \"float\" elif method == \"0ifnull\": category,", "check_types([(\"k\", k, [int, float]), (\"dropna\", dropna, [bool])]) topk = \"\"", "**kwargs): \"\"\" --------------------------------------------------------------------------- Returns the distinct categories of the vColumn.", "------- float kurtosis See Also -------- vDataFrame.aggregate : Computes the", ") result = executeSQL( query, title=\"Computing the different aggregations.\", method=\"fetchall\",", "{}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), limit, offset, ), title=title,", "\"cbrt\", \"ceil\", \"cos\", \"cosh\", \"cot\", \"exp\", \"floor\", \"ln\", \"log\", \"log10\",", "\"<\", \"]\" if labels: label = labels[idx] else: label =", "WHERE {} IS NOT NULL) VERTICAPY_SUBTABLE {}\".format( self.alias, self.alias, self.alias,", "= [elem for elem in copy_trans] raise QueryError(\"{}\\nAn Error happened", "elif method == \"robust_zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"approx_50%\"]) /", "[str]), (\"q\", q, [tuple]), ( \"start_date\", start_date, [str, datetime.datetime, datetime.date,", "* 1.01 / nbins if h > 0.01: h =", "\\-. \\ # \\ \\__| \\ \\_____\\ \\ \\_____\\ \\", "h will be computed. ax: Matplotlib axes object, optional The", "columns=[self.alias]) try: if \"count\" in sauv: self.catalog[\"count\"] = sauv[\"count\"] self.catalog[\"percent\"]", "= 0, nbins: int = -1, k: int = 6,", "\"int\") else \"\" if (h > 1) or (self.category() ==", "True, use_numbers_as_suffix: bool = False, ): \"\"\" --------------------------------------------------------------------------- Encodes the", "self.parent.format_colnames(by) from verticapy.plot import boxplot return boxplot(self, by, h, max_cardinality,", "number of seconds, otherwise it will represent a number. Returns", "drop_first else 0 for k in range(len(distinct_elements) - n): name", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'max' (Maximum). Returns -------", "method: str = \"zscore\", by: list = [], return_trans: bool", "--------------------------------------------------------------------------- Renames the vColumn by dropping the current vColumn and", "the vColumn. vDataFrame[].str_replace : Replaces the regular expression matches in", "column = [], self.alias for idx in range(len(breaks) - 1):", "name.replace('\"', \"\"), EmptyParameter( \"The parameter 'name' must not be empty\"", "could work if it is part of the DB version", "NULLIFZERO(pt_events))) END AS iv FROM ({}) x ORDER BY ord\".format(", "---------- k: int, optional Number of most occurent elements to", "{}\".format( by[0], self.alias, self.alias, self.parent.__genSQL__(), by[0], ), title=\"Computing the different", "= by if method in [\"ffill\", \"pad\", \"bfill\", \"backfill\"]: all_partition", "{}::{} AS {} FROM {} WHERE {} IS NOT NULL", "the vColumn. \"\"\" check_types([(\"to_replace\", to_replace, [str]), (\"value\", value, [str])]) return", "is numerical (except ints and bools), a RF Classifier otherwise.", "response partitioned by the different vColumn categories. Parameters ---------- response:", "title = \"Describes the statics of {} partitioned by {}.\".format(", "matches for the regular expression in each record of the", "values[elem][i] = float(values[elem][i]) return tablesample(values) # ---# def discretize( self,", "class vColumn(str_sql): \"\"\" --------------------------------------------------------------------------- Python object which that stores all", "column 'numcol' must be numerical\" ) cast = \"::int\" if", "text_info += \"\\t{} => {}\".format(distinct_elements[k], k) expr = \", \".join(expr)", "**style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the bar chart of the", "information, see utilities.tablesample. See Also -------- vDataFrame[].describe : Computes the", "return self.parent # ---# def fill_outliers( self, method: str =", "return self.parent except Exception as e: raise ConversionError( \"{}\\nThe vColumn", "object See Also -------- vDataFrame.plot : Draws the time series.", "(\"dropna\", dropna, [bool])]) topk = \"\" if (k < 1)", "vColumn with One-Hot Encoding. \"\"\" check_types([(\"response\", response, [str])]) self.parent.are_namecols_in(response) response", "November 1993 the 3rd. end_date: str / date, optional Input", "the method 'same_freq'\" ) count = self.count() nb = int(float(count", "ELSE NULL END)\" trans = (trans, \"varchar\", \"text\") if return_enum_trans:", "computation. n: int, optional Integer corresponding to the offset. For", "is set to 'smart'. A RF Regressor will be trained", "from verticapy.toolbox import * from verticapy.errors import * ## #", "list = [], return_trans: bool = False ): \"\"\" ---------------------------------------------------------------------------", "= [], parent=None, catalog: dict = {} ): self.parent, self.alias,", "{} has no mode (only missing values).\\nNothing was filled.\".format( self.alias", ") return top # ---# def mul(self, x: float): \"\"\"", "catalog: self.catalog[elem] = catalog[elem] # ---# def __getitem__(self, index): if", "---# def avg(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'avg'", "\"auto\", ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the pie chart", "MIN(ord) AS ord, SUM(1 - {}) AS non_events, SUM({}) AS", "title=\"Computing the mode.\", method=\"fetchall\", ) top = None if not", "nb, [], [] while total < int(float(count / int(nbins))) *", "= \"CASE WHEN \" + \" WHEN \".join(conditions) + \"", "the vColumn using 'skewness'. Returns ------- float skewness See Also", "of non-Missing elements. See Also -------- vDataFrame.aggregate : Computes the", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'median'. Parameters ---------- approx:", "store usage. \"\"\" pre_comp = self.parent.__get_catalog_value__(self.alias, \"store_usage\") if pre_comp !=", ") trans += \" ELSE NULL END)\" trans = (trans,", "{}.\".format(self.alias), method=\"fetchrow\", ) if method == \"winsorize\": self.clip(lower=p_alpha, upper=p_1_alpha) elif", "self.alias, text_info ) ) return self.parent # ---# def mad(self):", "vDataFrame is modified. Attributes ---------- alias, str : vColumn alias.", "median : median. mode : mode (most occurent element). 0ifnull", "to use iv_woe.\".format(y) ) self.parent[y].distinct() trans = self.discretize( method=\"same_width\" if", "transformations], ) self.catalog = { \"cov\": {}, \"pearson\": {}, \"spearman\":", "self.isnum() and method in (\"same_width\", \"auto\"): if not (h) or", "\"The parameter 'name' must not be empty\" ) assert not", "\\/_/ # # # ---# class vColumn(str_sql): \"\"\" --------------------------------------------------------------------------- Python", "'{}'\".format(self.parent[by].alias, column) )[self.alias].density( bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=(xmin, xmax), ax=ax, **updated_dict(param,", "if (h > 1) or (self.category() == \"float\"): trans =", "\"\", end_date: Union[str, datetime.datetime, datetime.date] = \"\", plot_median: bool =", "upper ZScores. threshold: float, optional Uses the Gaussian distribution to", "self, method: str = \"density\", of: str = \"\", max_cardinality:", "by[0] ) result = executeSQL( query, title=\"Computing the different aggregations.\",", "BY ord\".format( self.alias, query, ) title = \"Computing WOE &", "enumerate(columns): param = {\"color\": colors[idx % len(colors)]} ax = self.parent.search(", ") self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) drop(tmp_view_name, method=\"view\") self.parent.to_db(tmp_view_name) from verticapy.learn.ensemble", "warning_message = \"Normalize doesn't work on booleans\".format(self.alias) warnings.warn(warning_message, Warning) elif", "op1, close_l = \"<=\", \"[\" elif idx == 0: op1,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "bool = True, ): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using", "or (method == \"categorical\") ): query = \"\"\"(SELECT {0} ||", "Keeps the topk most frequent categories and merge the other", "if isinstance(values[elem][i], decimal.Decimal): values[elem][i] = float(values[elem][i]) return tablesample(values) # ---#", "= executeSQL( query, title=\"Computing the top{} categories of {}.\".format( k", "+ 1] ... END odd : CASE ... WHEN vColumn", "user transformations. If the vDataFrame represents the entire relation, a", "FROM {}) (SELECT AVG({}) FROM vdf_table WHERE {} < {})", "the approximate density function. Increasing this parameter will increase the", "k >= 2, ParameterError( \"Parameter 'k' must be greater or", "labels: list = [], include_lowest: bool = True, right: bool", "mean_1_alpha = [ item[0] for item in executeSQL( query=query, title=\"Computing", "language governing permissions and # limitations under the License. #", "not (h) or h <= 0: if nbins <= 0:", "\"\", end_date: Union[str, datetime.datetime, datetime.date] = \"\", area: bool =", "n, [int, float])]) if n == 1: pre_comp = self.parent.__get_catalog_value__(self.alias,", "Slices the vColumn. \"\"\" check_types([(\"to_replace\", to_replace, [str]), (\"value\", value, [str])])", "- cmin == 0: warning_message = \"Can not normalize {}", "trigonometric inverse sine atan : trigonometric inverse tangent cbrt :", "max_cardinality: int = 6, nbins: int = 0, h: float", "November 1993 the 3rd. area: bool, optional If set to", "on data stored in Vertica, taking advantage Vertica’s # speed", "= gen_colors()[0] if not (\"legend\" in kwargs): kwargs[\"legend\"] = True", "to pass to the Matplotlib functions. Returns ------- ax Matplotlib", "h will be picked or computed) nbins: int, optional Number", "\\/\\ \\ \\ \\ \\____ \\ \\ \\_\\ \\ \\", "the head of the vColumn. Parameters ---------- limit: int, optional", "{} AS ord, {}::int AS {} FROM {}\".format( trans, self.alias,", "method.lower() self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) nullifzero, n = 1, len(by)", "be numerical for Normalization\") return self.parent # ---# def nsmallest(self,", "ts: str TS (Time Series) vColumn to use to order", "q quantile (ex: 50% for the median) prod : product", "the vColumn. Parameters ---------- limit: int, optional Number of elements", "k: int = 6, new_category: str = \"Others\", RFmodel_params: dict", "OVER (ORDER BY {}) AS _verticapy_row_nb_ FROM {} WHERE {}", "to 1\") where = \" WHERE {} IS NOT NULL", "\"avg\", \"median\", \"ffill\", \"pad\", \"bfill\", \"backfill\", ], ), (\"expr\", expr,", "\" result = executeSQL( \"SELECT {} FROM (SELECT {}, COUNT(*)", "check_types([(\"n\", n, [int, float])]) return self.apply(func=\"ROUND({}, {})\".format(\"{}\", n)) # ---#", "func, [str]), (\"copy_name\", copy_name, [str])]) try: try: ctype = get_data_types(", "h, ax=ax, **style_kwds) # ---# def iloc(self, limit: int =", "if pie_type == \"rose\" else False if of: self.parent.are_namecols_in(of) of", "tan : trigonometric tangent tanh : hyperbolic tangent x: int/float,", "{} LIMIT 1\".format( self.alias, cast, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index, ) return", "field: str): \"\"\" --------------------------------------------------------------------------- Extracts a specific TS field from", "int = 0, h: float = 0, ax=None, **style_kwds, ):", "int])]) assert (lower != None) or (upper != None), ParameterError(", "frequent elements will be gathered together to create a new", "method. new_category: str, optional The name of the merging category", "vDataFrame[].dropna : Drops the vColumn missing values. \"\"\" if isinstance(by,", "name = ( '\"{}{}\"'.format(prefix, k) if (use_numbers_as_suffix) else '\"{}{}\"'.format( prefix,", "setattr(self.parent, name, new_vColumn) setattr(self.parent, name.replace('\"', \"\"), new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name]", "= ( \"WHEN {} > {} THEN {} \".format(\"{}\", upper,", "\"NULLIFZERO\" if (nullifzero) else \"\", stddev ) else: final_transformation =", "\"nbins\": 100}) model.set_params(RFmodel_params) parameters = model.get_params() try: model.fit(tmp_view_name, [self.alias], response)", "\"\"\" if isinstance(method, str): method = method.lower() check_types( [ (\"method\",", "\"WHERE _verticapy_row_nb_ IN ({})\".format( \", \".join([\"1\"] + nth_elems + [str(count)])", "AS 'index', COUNT({1}) AS count, 100 * COUNT({1}) / {2}", "= self.parent.shape()[0] self.catalog[\"percent\"] = 100 self.parent.__add_to_history__( \"[Label Encoding]: Label Encoding", "during the computation. Returns ------- tablesample An object containing the", "Value to use to impute the vColumn. method: dict, optional", "\"\"\" --------------------------------------------------------------------------- Looks if some specific records are in the", "else: xmin, xmax = xlim custom_lines = [] columns =", "\"\"\" --------------------------------------------------------------------------- Fills missing elements in the vColumn with a", "> {} THEN {} ELSE {} END)\".format( \"{}\", p_alpha, mean_alpha,", ": trigonometric cosine cosh : hyperbolic cosine cot : trigonometric", "of another number round : rounds a value to a", "Warning) return self elif (n == 1) and (self.parent[by[0]].nunique() <", "_| _ /~\\ _ |. # |_)\\/ |_)(_|(_|| \\_/|_|(_||| #", "return to_tablesample(query, title=title) # ---# def numh(self, method: str =", "LIMIT 1\".format( alpha, self.alias, alpha, self.alias, self.parent.__genSQL__() ) p_alpha, p_1_alpha", "method == \"minmax\": self.catalog[elem] = (sauv[elem] - sauv[\"min\"]) / (", "model.set_params({\"n_estimators\": 20, \"max_depth\": 8, \"nbins\": 100}) model.set_params(RFmodel_params) parameters = model.get_params()", "ELSE '{}' END)\".format( bin_spatial_to_str(self.category()), \", \".join( [ \"'{}'\".format(str(elem).replace(\"'\", \"''\")) for", "def clip(self, lower=None, upper=None): \"\"\" --------------------------------------------------------------------------- Clips the vColumn by", "def apply_fun(self, func: str, x: float = 2): \"\"\" ---------------------------------------------------------------------------", "return self.transformations[-1][1].lower() dtype = ctype # ---# def date_part(self, field:", "({} ORDER BY {}))\".format( \"{}\", \"{}\", partition_by, order_by_ts ) if", "Extracts the regular expression in each record of the vColumn.", "an input value. vDataFrame[].str_slice : Slices the vColumn. \"\"\" check_types([(\"pat\",", "in (\"bool\", \"boolean\") # ---# def isdate(self): \"\"\" --------------------------------------------------------------------------- Returns", "self.ctype() tail.name = self.alias return tail # ---# def isbool(self):", "self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import gen_colors from matplotlib.lines", "the following: (function, type, category) parent: vDataFrame, optional Parent of", "else: if not (\"color\" in kwargs): from verticapy.plot import gen_colors", "optional vColumn to use to partition the TS. start_date: str", "'std' (Standard Deviation). Returns ------- float std See Also --------", "vDataFrame.fill_outliers : Fills the outliers in the vColumn. vDataFrame.outliers :", "quantiles of {}.\".format(self.alias), method=\"fetchrow\", ) if method == \"winsorize\": self.clip(lower=p_alpha,", "tail of the vColumn. \"\"\" check_types([(\"limit\", limit, [int, float]), (\"offset\",", "--------------------------------------------------------------------------- Adds a copy vColumn to the parent vDataFrame. Parameters", "to use to impute the vColumn. method: dict, optional Method", "= to_tablesample( \"SELECT {} AS {} FROM {}{} LIMIT {}", "decimal places sign : arithmetic sign sin : trigonometric sine", "True, alpha: float = 0.05 ): \"\"\" --------------------------------------------------------------------------- Drops outliers", "method == \"mean\": query = \"WITH vdf_table AS (SELECT *", "elif method == \"null\": self.apply( func=\"(CASE WHEN ({} BETWEEN {}", "verticapy.options[\"print_info\"]: print(\"The mean encoding was successfully done.\") return self.parent #", ") if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import", "input method. Parameters ---------- method: str, optional The describe method.", "mad See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "int, float], ), (\"plot_median\", plot_median, [bool]), ] ) self.parent.are_namecols_in(ts) ts", ") if verticapy.options[\"print_info\"]: print(\"The mean encoding was successfully done.\") return", "from verticapy.plot import spider as spider_plot return spider_plot( self.parent, columns,", "vDataFrame.expected_store_usage : Returns the vDataFrame expected store usage. \"\"\" pre_comp", "work if it is part of the DB version you", "function. For more information, see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html Returns ------- ax", "------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a", "optional The interval size to convert to use to convert", "\"numerical\")) or not (is_numeric) or (method == \"categorical\") ): query", "processing, VerticaPy brings the logic to the data. # #", "float])]) query = \"SELECT * FROM {} WHERE {} IS", "drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") raise drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") result", "False, the function's performance can drastically decrease. Returns ------- float/str", "self.isnum(): if method == \"zscore\": if n == 0: nullifzero", "Apache License, Version 2.0 (the \"License\"); # You may not", "\"SELECT {}, {}({}) FROM {} GROUP BY {};\".format( by[0], fun,", "\" FROM vdf_table WHERE {0} IS NOT NULL GROUP BY", "the result. For more information, see utilities.tablesample. See Also --------", "absolute deviation approx_unique : approximative cardinality count : number of", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'mad' (median absolute deviation).", "tangent tanh : hyperbolic tangent x: int/float, optional If the", "(k < 1) else \"LIMIT {}\".format(k) dropna = \" WHERE", "\"\"\" --------------------------------------------------------------------------- Draws the spider plot of the input vColumn", ") donut = True if pie_type == \"donut\" else False", "computed) nbins: int, optional Number of nbins. If empty, an", "x: int/float, optional If the function has two arguments (example,", "= (14, 10) return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs) # ---# def get_dummies(", "case, the parameter 'numcol' must be defined. numerical : Uses", "the next element (Constant Interpolation). ffill : Propagation of the", "= [] prefix = ( self.alias.replace('\"', \"\") + prefix_sep.replace('\"', \"_\")", "vDataFrame by using an advanced analytical function on a specific", "information, see utilities.tablesample. See Also -------- vDataFrame[].head : Returns the", "\"numerical\", \"categorical\", \"cat_stats\"]), (\"max_cardinality\", max_cardinality, [int, float]), (\"numcol\", numcol, [str]),", "optional If set to True, the approximate quantile is returned.", "return self.apply(func=\"ABS({})\") # ---# def add(self, x: float): \"\"\" ---------------------------------------------------------------------------", "self.parent one_hot_encode = get_dummies # ---# def head(self, limit: int", "NOT NULL ORDER BY {} ASC LIMIT {}\".format( self.parent.__genSQL__(), self.alias,", "IN ({})\".format( \", \".join([\"1\"] + nth_elems + [str(count)]) ) query", "'{0}' AS 'index', COUNT({1}) AS count, 100 * COUNT({1}) /", "= index.stop index_start = index.start if not (isinstance(index_start, int)): index_start", "result if elem[1] != None ] ), ) stddev =", "vColumn 'of' (ex: 50% to get the median). It can", "nbins using Random Forest.\", method=\"fetchall\", ) result = [elem[0] for", "auto : Mean for the numerical and Mode for the", "Also -------- vDataFrame[].isbool : Returns True if the vColumn is", "self, ts, by, start_date, end_date, area, step, ax=ax, **style_kwds, )", "method=\"fetchall\", ) ] if mean_alpha == None: mean_alpha = \"NULL\"", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using multiple statistical aggregations: min,", "drawing the box plot. The other categories will be filtered.", "/ ({})\".format(\"{}\", med, mad), \"float\", \"float\", ) ] else: warning_message", "/ / # \\/ / / # / / #", "threshold to use to determine if the vColumn will be", "check: self.parent.are_namecols_in(column) column = self.parent.format_colnames(column) columns += [column] if not", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"mad\"]).values[self.alias][0] # ---# def max(self):", "Robust Z-Score (median and mad). (x - median) / (1.4826", "vColumn. Parameters ---------- func: str Function to use to transform", "\"(CASE {}{}ELSE {} END)\".format(lower_when, upper_when, \"{}\") self.apply(func=func) return self.parent #", "start: bool = True): \"\"\" --------------------------------------------------------------------------- Slices and transforms the", "self.isnum() or self.isdate(), ParameterError( \"numh is only available on type", "# ---# def skewness(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "self.apply( func=\"(CASE WHEN {} < {} THEN {} WHEN {}", "---# def isin(self, val: list, *args): \"\"\" --------------------------------------------------------------------------- Looks if", "distinct = self.topk(k).values[\"index\"] trans = ( \"(CASE WHEN {} IN", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using an input 'quantile'. Parameters", "the vColumn is numerical. \"\"\" return self.category() == \"date\" #", "law or agreed to in writing, software # distributed under", "or ( self.isnum() and method == \"smart\" ): n =", "some specific records are in the vColumn and it returns", "BY {})\".format( self.alias, \", \".join(by) ), ) else: cmax, cmin", "fun = \"MEDIAN\" if (method == \"median\") else \"AVG\" if", "donut = True if pie_type == \"donut\" else False rose", "(\"{}\", self.ctype(), self.category()) ] self.parent[copy_name].transformations += [(func, ctype, category)] self.parent[copy_name].catalog", "\"[Label Encoding]: Label Encoding was applied to the vColumn {}", "query = \"SELECT COUNT({}) AS NAs, MIN({}) AS min, APPROXIMATE_PERCENTILE({}", "(pt_non_events - pt_events) * ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS iv", "stored in the vDataFrame history. Returns ------- vDataFrame self.parent See", "- 1): first_elem, second_elem = breaks[idx], breaks[idx + 1] if", "\"SELECT {} AS {} FROM {} WHERE {} IS NOT", "the transformation instead of the vDataFrame parent and do not", "\"minmax\": self.catalog[\"min\"] = 0 self.catalog[\"max\"] = 1 self.parent.__add_to_history__( \"[Normalize]: The", "with the method '{}'.\".format( self.alias, method ) ) else: raise", "self.alias, self.parent.shape()[0], self.parent.__genSQL__(), dropna, self.alias, topk, ) result = executeSQL(", "nbins: int, optional Maximum number of points to use to", "\" UNION ALL \".join(query) ) title = \"Describes the statics", "this issue.\" ) new_vColumn = vColumn( name, parent=self.parent, transformations=[item for", "were \" if len(all_new_features) > 1 else \" was \"", "\\ /\\ \"-.\\ \\ # \\ \\ \\'/ \\ \\", "= self.parent.shape()[0] tail.offset = offset tail.dtype[self.alias] = self.ctype() tail.name =", "{}, MIN(ord) AS ord, SUM(1 - {}) AS non_events, SUM({})", "(1, None), ValueError( \"vColumn doesn't allow slicing having steps different", "= [ ( \"({} - {}) / {}({})\".format( \"{}\", avg,", "assert len(breaks) == len(labels) + 1 or not (labels), ParameterError(", "\"numh\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp assert self.isnum() or", "str, optional vColumn to use to partition the TS. start_date:", "return. Returns ------- tablesample An object containing the result. For", "self.parent.__genSQL__() ), print_time_sql=False, ) except: avg, stddev = ( \"AVG({})", "False, n: int = 1): \"\"\" --------------------------------------------------------------------------- Returns the nth", "sequentialize # data transformation operations, and offers beautiful graphical options.", "\", \".join(expr) + \", {})\".format(len(distinct_elements)) self.transformations += [(expr, \"int\", \"int\")]", "n = 1 if drop_first else 0 for k in", "print(\"Nothing was filled.\") self.transformations = [elem for elem in copy_trans]", "from verticapy.learn.neighbors import KernelDensity schema = verticapy.options[\"temp_schema\"] if not (schema):", "method = method.lower() check_types( [ (\"method\", method, [\"winsorize\", \"null\", \"mean\"]),", "of the dummies. drop_first: bool, optional Drops the first dummy", "Method to use to fill the vColumn outliers. mean :", "1\".format( avg, stddev, self.parent.__genSQL__() ), print_time_sql=False, ) except: avg, stddev", "vColumn_025, vColumn_075, vColumn_max = result sturges = max( float(vColumn_max -", "\"\", stddev ), \"float\", \"float\", ) ] elif method ==", "variable. Parameters ---------- y: str Response vColumn. nbins: int, optional", "generation will be slower if the vDataFrame has been transformed", "the current vColumn and creating a copy with the specified", "use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values self.parent.filter( \"ABS({} - {}) /", "vDataFrame[].str_slice : Slices the vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return", "for category in cat: tmp_query = \"\"\"SELECT '{0}' AS 'index',", "= float(top) self.parent.__update_catalog__( {\"index\": [\"top{}\".format(n)], self.alias: [top]} ) return top", "fill the vColumn outliers. mean : Replaces the upper and", "mad). (x - median) / (1.4826 * mad) minmax :", "invalid. max_cardinality: int/tuple, optional Maximum number of distinct elements for", "== 1: pre_comp = self.parent.__get_catalog_value__(self.alias, \"top\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\":", "sauv[\"approx_50%\"]) / ( 1.4826 * sauv[\"mad\"] ) elif method ==", ": Draws the histogram of the vColumn based on an", "warning_message = \"Can not normalize {} using a Z-Score -", "def date_part(self, field: str): \"\"\" --------------------------------------------------------------------------- Extracts a specific TS", "[elem[0] for elem in result] elif self.isnum() and method in", ": natural logarithm log : logarithm log10 : base 10", "range(len(distinct_elements) - n): name = ( '\"{}{}\"'.format(prefix, k) if (use_numbers_as_suffix)", "- n): name = ( '\"{}{}\"'.format(prefix, k) if (use_numbers_as_suffix) else", "index_start if limit <= 0: limit = 0 limit =", "in transformations], ) self.catalog = { \"cov\": {}, \"pearson\": {},", "to True, NULL values will not be considered during the", "vDataFrame Global Outliers. \"\"\" if isinstance(by, str): by = [by]", "str): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using the average of", "log : logarithm log10 : base 10 logarithm mod :", "of the next element (Constant Interpolation). ffill : Propagation of", "AS max FROM vdf_table\"\"\".format( category, self.alias, self.parent.shape()[0], numcol, cast, )", "1.4826 if mad != 0: if return_trans: return \"({} -", "(median and mad). (x - median) / (1.4826 * mad)", "function to all the vColumns. vDataFrame.eval : Evaluates a customized", "= \"NULL\" self.apply( func=\"(CASE WHEN {} < {} THEN {}", "\"varchar\", \"text\") if return_enum_trans: return trans else: self.transformations += [trans]", "\"log10\", \"mod\", \"pow\", \"round\", \"sign\", \"sin\", \"sinh\", \"sqrt\", \"tan\", \"tanh\",", "True if pie_type == \"rose\" else False if of: self.parent.are_namecols_in(of)", "input element. Parameters ---------- x: float Input number. Returns -------", "optional Input Start Date. For example, time = '03-11-1993' will", "Computes the vDataFrame input aggregations. \"\"\" return self.quantile(0.5, approx=approx) #", "in sauv: self.catalog[\"count\"] = sauv[\"count\"] self.catalog[\"percent\"] = ( 100 *", "(Average). Returns ------- float average See Also -------- vDataFrame.aggregate :", "value. \"\"\" check_types([(\"start\", start, [int, float]), (\"step\", step, [int, float])])", "= self.avg() elif fun == \"MEDIAN\": val = self.median() new_column", "/ # \\_______/ / / # ______ / / #", "type is date like). The vColumn will be transformed. Parameters", "\"count\", \"mean\", \"std\", \"min\", \"approx_25%\", \"approx_50%\", \"approx_75%\", \"max\", ] if", "\"avg\"]).transpose().values self.parent.filter( \"ABS({} - {}) / {} < {}\".format( self.alias,", "AS std, MIN({3}{4}) AS min, APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile", "{} LIMIT 1\".format( avg, stddev, self.parent.__genSQL__() ), print_time_sql=False, ) except:", "drop(tmp_model_name, method=\"model\") result = [self.min()] + result + [self.max()] elif", "else: query = \"SELECT PERCENTILE_CONT({}) WITHIN GROUP (ORDER BY {})", "else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) name = (", "self, ts: str, by: str = \"\", start_date: Union[str, datetime.datetime,", "be computed. h: float, optional Interval width of the bar.", "\"\"), new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] all_new_features += [name] conj =", "# ---# def nlargest(self, n: int = 10): \"\"\" ---------------------------------------------------------------------------", "booleans\".format(self.alias) warnings.warn(warning_message, Warning) elif self.isnum(): if method == \"zscore\": if", "MIN !\".format( self.alias ) warnings.warn(warning_message, Warning) return self elif n", "Q3, MAX({}) AS max FROM {}\".format( self.alias, self.alias, self.alias, self.alias,", "Aggregates the vColumn using 'count' (Number of non-Missing elements). Returns", "as e: self.transformations = [elem for elem in copy_trans] raise", "using 'product'. Returns ------- float product See Also -------- vDataFrame.aggregate", "{}, COUNT(*) AS _verticapy_cnt_ FROM {}{}GROUP BY {} ORDER BY", "check_types( [ (\"length\", length, [int, float]), (\"unit\", unit, [str]), (\"start\",", "1 or not (labels), ParameterError( \"Length of parameter breaks must", "a Z-Score - The Standard Deviation is null !\".format( self.alias", "\"\"\" --------------------------------------------------------------------------- Returns the tail of the vColumn. Parameters ----------", "set to True, draw a Step Plot. ax: Matplotlib axes", "to the vDataFrame by using an advanced analytical function on", "vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_SUBSTR({}, '{}')\".format(\"{}\", pat.replace(\"'\",", "set to True, the information will be stored in the", "# # ---# def aad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "(\"bool\", \"boolean\") # ---# def isdate(self): \"\"\" --------------------------------------------------------------------------- Returns True", "the vDataFrame.\".format( self.alias, name ) ) return self.parent # ---#", "dropna, self.alias, topk, ) result = executeSQL( query, title=\"Computing the", "method.lower() if self.isnum() and method == \"smart\": schema = verticapy.options[\"temp_schema\"]", "vColumn. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. vDataFrame[].mean_encode", "float]), (\"h\", h, [int, float]), ] ) if of: self.parent.are_namecols_in(of)", "**style_kwds, ) # ---# def rename(self, new_name: str): \"\"\" ---------------------------------------------------------------------------", "to the vColumn. Parameters ---------- x: float If the vColumn", "= self.aggregate([\"avg\", \"std\"]).values[self.alias] if stddev == 0: warning_message = \"Can", "discretize( self, method: str = \"auto\", h: float = 0,", "\"robust_zscore\": self.catalog[\"median\"] = 0 self.catalog[\"mad\"] = 1 / 1.4826 elif", "str = \"auto\", h: float = 0, nbins: int =", "q, start_date, end_date, plot_median, ax=ax, **style_kwds, ) # ---# def", "or implied. # See the License for the specific language", "on. **style_kwds Any optional parameter to pass to the Matplotlib", "\"kendall\": {}, \"cramer\": {}, \"biserial\": {}, \"regr_avgx\": {}, \"regr_avgy\": {},", "(must be > 1) k: int, optional The integer k", "logistic : Logistic kernel. sigmoid : Sigmoid kernel. silverman :", "the vColumn by keeping only the input number of digits", "histogram nbins using Random Forest.\", method=\"fetchall\", ) result = [elem[0]", "= executeSQL( \"SELECT {} FROM (SELECT {}, COUNT(*) AS _verticapy_cnt_", "gen_cmap kwargs[\"cmap\"] = gen_cmap()[0] else: if not (\"color\" in kwargs):", "# # Methods # # ---# def aad(self): \"\"\" ---------------------------------------------------------------------------", "aggregation. vColumns will memorize the already computed aggregations to gain", "'k' must be greater or equals to 2 in case", "or self.isbool(): all_new_features = [] prefix = ( self.alias.replace('\"', \"\")", "self.alias, \", \".join(by) ), ) else: avg, stddev = (", "def distinct(self, **kwargs): \"\"\" --------------------------------------------------------------------------- Returns the distinct categories of", "--------------------------------------------------------------------------- Subtracts the input element from the vColumn. Parameters ----------", ") return self.parent # ---# def dropna(self): \"\"\" --------------------------------------------------------------------------- Filters", "usage. \"\"\" import sys total = ( sys.getsizeof(self) + sys.getsizeof(self.alias)", ">= 2, ParameterError( \"Parameter 'nbins' must be greater or equals", "method, [str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [list]), (\"h\", h,", "/ 1.4826 elif method == \"zscore\": self.catalog[\"mean\"] = 0 self.catalog[\"std\"]", "Adds the input element to the vColumn. Parameters ---------- x:", "be a cutomized aggregation (ex: AVG(column1) + 5). of: str,", "-------- vDataFrame.iv_woe : Computes the Information Value (IV) Table. \"\"\"", "and a limit). Parameters ---------- limit: int, optional Number of", "the vColumns to the input type. \"\"\" check_types([(\"dtype\", dtype, [str])])", "---------- *args / **kwargs Any optional parameter to pass to", "function to the vColumn. Parameters ---------- func: str, Function in", "record is greater than the threshold, it will be considered", "vColumn {} has no mode (only missing values).\\nNothing was filled.\".format(", "method.lower() check_types( [ (\"method\", method, [\"winsorize\", \"null\", \"mean\"]), (\"alpha\", alpha,", "science life cycle, uses a ‘pipeline’ mechanism to sequentialize #", "be composed of two flower brackets {}. For example to", "root tan : trigonometric tangent tanh : hyperbolic tangent x:", "Computes regular width bins. smart : Uses the Random Forest", "information will be stored in the vDataFrame history. Returns -------", "warnings, datetime from collections.abc import Iterable from typing import Union", "self.category() == \"int\": h = int(max(math.floor(h), 1)) floor_end = -1", "= self.transformations + [(expr, \"bool\", \"int\")] new_vColumn = vColumn( name,", "= [distinct_count] + result index = [ \"unique\", \"count\", \"mean\",", "= self.numh() else: h = (self.max() - self.min()) * 1.01", "use iv_woe.\".format(y) ) self.parent[y].distinct() trans = self.discretize( method=\"same_width\" if self.isnum()", "= [ ( \"({} - {}) / ({})\".format(\"{}\", med, mad),", "- {}) / ({})\".format(\"{}\", med, mad), \"float\", \"float\", ) ]", "\"'{}'\".format(str(elem).replace(\"'\", \"''\")) for elem in distinct ] ), bin_spatial_to_str(self.category()), new_category.replace(\"'\",", "most occurent elements to return. dropna: bool, optional If set", "result = to_tablesample(query, title=title) result.values[\"index\"] += [\"total\"] result.values[\"non_events\"] += [sum(result[\"non_events\"])]", "\"\"\" if \"agg\" not in kwargs: query = \"SELECT {}", "numerical, False otherwise. Returns ------- bool True if the vColumn", "optional Interval width of the vColumns 1 and 2 bars.", ") ) # ---# def str_slice(self, start: int, step: int):", "'approx_75%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.9) AS 'approx_90%',", "APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.25) AS 'approx_25%', APPROXIMATE_PERCENTILE", "if len(cat) == 1: lp, rp = \"(\", \")\" else:", "aggregations used are the median and two input quantiles. Parameters", "input aggregations. \"\"\" check_types( [ (\"method\", method, [\"auto\", \"numerical\", \"categorical\",", "self.transformations += final_transformation sauv = {} for elem in self.catalog:", "in each of the vColumn records. The vColumn will be", "other categories will be filtered. ax: Matplotlib axes object, optional", "# ---# def slice(self, length: int, unit: str = \"second\",", "and 2 bars. It is only valid if the vColumns", "n): trans += \"WHEN {} BETWEEN {} AND {} THEN", "that relation. vColumns simplify several processes with its abstractions. Parameters", "Iterable)): cat_priority = [cat_priority] check_types( [ (\"by\", by, [str]), (\"max_cardinality\",", "= max(0, self.parent.shape()[0] - limit) title = \"Reads {}.\".format(self.alias) tail", "method: str, optional The method to use to discretize the", "median. mode : mode (most occurent element). 0ifnull : 0", "ParameterError( \"Parameter 'k' must be greater or equals to 2", "average median : median min : minimum mode : most", "self, val=None, method: str = \"auto\", expr: str = \"\",", "return self.count() > 0 # ---# def __repr__(self): return self.head(limit=verticapy.options[\"max_rows\"]).__repr__()", "[(func, ctype, category)] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Apply]: The vColumn '{}'", "drastically decrease. Returns ------- float quantile (or approximate quantile). See", "query = \"SELECT PERCENTILE_CONT({}) WITHIN GROUP (ORDER BY {}) OVER", "= (trans, \"varchar\", \"text\") if return_enum_trans: return trans else: self.transformations", "-> {}'.\".format( copy_name.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) ) else: for", "all of the above. The idea is simple: instead of", "extract. It must be one of the following: CENTURY /", "None)) else \" WHERE {} = '{}'\".format( bin_spatial_to_str(self.category(), self.alias), category,", "(method == \"categorical\")) or ( method == \"is_numeric\" ): self.parent.__update_catalog__({\"index\":", "\"text\", ) else: trans = (\"FLOOR({}) || ''\", \"varchar\", \"text\")", "else: for k in range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())]", "\"\"\" --------------------------------------------------------------------------- Applies the absolute value function to the input", "the plot. gaussian : Gaussian kernel. logistic : Logistic kernel.", "\"\"), EmptyParameter( \"The parameter 'name' must not be empty\" )", "final generated SQL code. Note: Dropping a vColumn can make", "= \" LIMIT {}\".format(limit) else: limit = \"\" query =", "\\_\\ \\ \\_\\\\\"\\_\\ # \\/_/ \\/_____/ \\/_____/ \\/_____/ \\/_____/ \\/_/", "( self.isnum() and method == \"smart\" ): n = len(result)", "= ( \"PARTITION BY {}\".format( \", \".join([quote_ident(column) for column in", "conditions += [ f\"'{first_elem}' {op1} {column} AND {column} {op2} '{second_elem}'", "self.parent.format_colnames(by) from verticapy.plot import ts_plot return ts_plot( self, ts, by,", "be considered as categorical. numcol: str, optional Numerical vColumn to", "self.catalog[\"count\"] = sauv[\"count\"] self.catalog[\"percent\"] = ( 100 * sauv[\"count\"] /", "val = [val] val += list(args) check_types([(\"val\", val, [list])]) val", "it will be considered as an outlier. use_threshold: bool, optional", "if elem[1] != None ] ), ) stddev = \"DECODE({},", "q Quantile of the vColumn 'of' (ex: 50% to get", "import * ## # # __ __ ______ ______ __", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"max\"]).values[self.alias][0] #", "(self.isnum() and method == \"same_freq\") or ( self.isnum() and method", "= 0 cmin, cmax = self.aggregate([\"min\", \"max\"]).values[self.alias] if cmax -", "another number round : rounds a value to a specified", "import gen_colors kwargs[\"color\"] = gen_colors()[0] if not (\"legend\" in kwargs):", ") count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = result sturges =", "+= [(func, ctype, category)] self.parent[copy_name].catalog = self.catalog self.parent.__add_to_history__( \"[Apply]: The", "vColumn based on an aggregation. \"\"\" check_types( [ (\"by\", by,", "method: str = \"auto\", max_cardinality: int = 6, numcol: str", "happened during the filling.\".format(e)) if total > 0: try: if", "bin_spatial_to_str(self.category(), self.alias), self.parent.__genSQL__(), ), title=\"Computing the Store Usage of the", "max_cardinality, [list]), (\"h\", h, [list, float, int]), ] ) if", "= ctype # ---# def date_part(self, field: str): \"\"\" ---------------------------------------------------------------------------", "function to the input vColumn. \"\"\" check_types( [ (\"breaks\", breaks,", "\"zscore\", by: list = [], return_trans: bool = False ):", "{} {} smallest elements.\".format(n, self.alias) return to_tablesample(query, title=title) # ---#", "with One-Hot Encoding. vDataFrame[].mean_encode : Encodes the vColumn using the", "filtered. ax: Matplotlib axes object, optional The axes to plot", "List of the different transformations. Each transformation must be similar", "check_types([(\"to_replace\", to_replace, [str]), (\"value\", value, [str])]) return self.apply( func=\"REGEXP_REPLACE({}, '{}',", "\"func\", func, [ \"abs\", \"acos\", \"asin\", \"atan\", \"cbrt\", \"ceil\", \"cos\",", ": Computes regular width bins. smart : Uses the Random", "approx: bool, optional If set to True, the approximate quantile", "1 / 1.4826 elif method == \"zscore\": self.catalog[\"mean\"] = 0", "new_category: str = \"Others\", RFmodel_params: dict = {}, response: str", "method=\"fetchall\", ) for idx, elem in enumerate(result): result[idx][0] = (", "= { \"cov\": {}, \"pearson\": {}, \"spearman\": {}, \"spearmand\": {},", "(sauv[elem] - sauv[\"approx_50%\"]) / ( 1.4826 * sauv[\"mad\"] ) elif", "\"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\"))", "with user defined Encoding. vDataFrame[].get_dummies : Encodes the vColumn with", "as lower bound quantile(alpha) and as upper bound quantile(1-alpha) if", "= \"SELECT {}, MIN(ord) AS ord, SUM(1 - {}) AS", "\\ / / /\\ ___\\ /\\ __ \\ /\\ \\", "-------- vDataFrame.topk : Returns the vColumn most occurent elements. \"\"\"", "str : vColumn alias. catalog, dict : Catalog of pre-computed", "{}) AS _verticapy_row_nb_ FROM {} WHERE {} IS NOT NULL)", "be sliced using the floor of the slicing instead of", "in range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += [(new_column,", "Also -------- vDataFrame[].decode : Encodes the vColumn with a user", "of the vColumn based on an aggregation. \"\"\" check_types( [", "be used as categorical (No h will be picked or", "{}, COUNT(*) AS _verticapy_cnt_, 100 * COUNT(*) / {} AS", "float])]) return self.apply(func=\"ROUND({}, {})\".format(\"{}\", n)) # ---# def sem(self): \"\"\"", "result = result.values[self.alias] elif (method == \"cat_stats\") and (numcol !=", "of the different transformations. Each transformation must be similar to", "1) k: int, optional The integer k of the 'topk'", "conj = \"s were \" if total > 1 else", "picked or computed) ax: Matplotlib axes object, optional The axes", "split_value FROM (SELECT split_value, MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE WHERE split_value", "percents. Parameters ---------- k: int, optional Number of most occurent", "a user-specified rule. Parameters ---------- val: int/float/str, optional Value to", "( 1.4826 * sauv[\"mad\"] ) elif method == \"zscore\": self.catalog[elem]", "query_result = executeSQL( query=query, title=\"Computing the distinct categories of {}.\".format(self.alias),", "ax=ax, **style_kwds, ) # ---# def std(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "\"NULL\" if mean_1_alpha == None: mean_alpha = \"NULL\" self.apply( func=\"(CASE", "the upper bound itself. Parameters ---------- lower: float, optional Lower", "method '{}'.\".format( self.alias, method ) ) else: raise TypeError(\"The vColumn", "- {})\".format( \"{}\", cmin, \"NULLIFZERO\" if (nullifzero) else \"\", cmax,", "n: int = 10): \"\"\" --------------------------------------------------------------------------- Returns the n smallest", "std See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "trigonometric sine sinh : hyperbolic sine sqrt : arithmetic square", "the input vColumns. \"\"\" if isinstance(cat_priority, str) or not (isinstance(cat_priority,", "a user-defined encoding. Parameters ---------- argv: object Any amount of", "to the input type. \"\"\" check_types([(\"dtype\", dtype, [str])]) try: query", "ax.legend( custom_lines, columns, title=by, loc=\"center left\", bbox_to_anchor=[1, 0.5], ) ax.set_xlabel(self.alias)", "if isinstance(by, str): by = [by] check_types( [ (\"method\", method,", "self.parent.format_colnames(of) from verticapy.plot import pie return pie( self, method, of,", "from the different categories to [0, n - 1] (n", "the different categories to [0, n - 1] (n being", "self.apply(func=\"{} * ({})\".format(\"{}\", x)) # ---# def nlargest(self, n: int", "result], \"percent\": [float(round(item[2], 3)) for item in result], } return", "Forest model parameters used to compute the best splits when", "[ (\"{}\", self.ctype(), self.category()) ] self.parent[copy_name].transformations += [(func, ctype, category)]", "int/float/str, optional Value to use to impute the vColumn. method:", "h, \"{}\", h, h, h, floor_end ), \"varchar\", \"text\", )", "split_value IS NOT NULL GROUP BY 1 ORDER BY 2", "Uses the threshold instead of the 'alpha' parameter. alpha: float,", "function on a specific vColumn. \"\"\" return self.parent.aggregate(func=func, columns=[self.alias]).transpose() agg", "quantile(1-alpha) will be dropped. Returns ------- vDataFrame self.parent See Also", "cardinality). Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode :", "[str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int, float]), (\"pie_type\",", "self.parent.__genSQL__(), self.alias, self.alias, p_alpha, self.alias, self.alias, p_1_alpha, ) mean_alpha, mean_1_alpha", "number of elements. same_width : Computes regular width bins. smart", "float]), (\"nbins\", nbins, [int, float]), (\"h\", h, [int, float]), ]", "self.alias, \", \".join(by) ), ) if return_trans: return \"({} -", "Forest with 20 trees and a maximum depth of 10.", "a limit). Parameters ---------- limit: int, optional Number of elements", "------- vDataFrame self.parent See Also -------- vDataFrame[].slice : Slices the", "density function. Increasing this parameter will increase the precision but", "MAX = MIN !\".format( self.alias ) warnings.warn(warning_message, Warning) return self", "a response column to find the most relevant interval to", "The vColumn to use to compute the aggregation. max_cardinality: int,", "max_cardinality: int = 6, numcol: str = \"\" ): \"\"\"", "3rd. area: bool, optional If set to True, draw an", "lower, lower) if (isinstance(lower, (float, int))) else \"\" ) upper_when", "categories of the vColumn. Returns ------- list Distinct caterogies of", "to solve this issue.\" ) self.add_copy(new_name) parent = self.drop(add_history=False) parent.__add_to_history__(", ") # ---# def apply_fun(self, func: str, x: float =", "*args): \"\"\" --------------------------------------------------------------------------- Looks if some specific records are in", "/ ( sauv[\"max\"] - sauv[\"min\"] ) except: pass if method", "of decimal places sign : arithmetic sign sin : trigonometric", "will be stored in the vDataFrame history. Returns ------- vDataFrame", "val = self.avg() elif fun == \"MEDIAN\": val = self.median()", "dtype: str): \"\"\" --------------------------------------------------------------------------- Converts the vColumn to the input", "self.transformations = [elem for elem in copy_trans] for elem in", "datetime.datetime, datetime.date]), (\"end_date\", end_date, [str, datetime.datetime, datetime.date]), (\"area\", area, [bool]),", "name. \\u26A0 Warning : SQL code generation will be slower", "bool, optional If set to True, draw an Area Plot.", "category. In this case, the parameter 'numcol' must be defined.", "== 0: nullifzero = 0 cmin, cmax = self.aggregate([\"min\", \"max\"]).values[self.alias]", "be closed on the right. Returns ------- vDataFrame self.parent See", "__len__(self): return int(self.count()) # ---# def __nonzero__(self): return self.count() >", "the License for the specific language governing permissions and #", "8, \"nbins\": 100}) model.set_params(RFmodel_params) parameters = model.get_params() try: model.fit(tmp_view_name, [self.alias],", "mean_alpha, \"{}\", p_1_alpha, mean_1_alpha, \"{}\" ) ) return self.parent #", "information, see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html Returns ------- ax Matplotlib axes object", "x, [int, float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, {}, {})\".format(x, \"{}\"))", "\\_/|_|(_||| # / # ____________ ______ # / __ `\\", ": hyperbolic sine sqrt : arithmetic square root tan :", "str, optional Numerical vColumn to use when the parameter method", "vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].get_dummies : Encodes the vColumn", "and 1 that represents the quantile. For example: 0.25 represents", "Number of digits to keep after the comma. Returns -------", "= {}).\".format(self.alias, y) result = to_tablesample(query, title=title) result.values[\"index\"] += [\"total\"]", "float]), (\"h\", h, [int, float]), (\"pie_type\", pie_type, [\"auto\", \"donut\", \"rose\"]),", "\"{}\\nThe vColumn {} can not be converted to {}\".format( e,", "or isinstance(val, float): category, ctype = \"float\", \"float\" elif method", "self.catalog[elem] = catalog[elem] # ---# def __getitem__(self, index): if isinstance(index,", "[(\"{}\", self.ctype(), self.category())] self.transformations += [(new_column, ctype, category)] try: sauv", "n == 1: try: result = executeSQL( \"SELECT {}, MIN({}),", "= 0 for elem in by: if len(self.parent[elem].transformations) > max_floor:", ": trigonometric inverse tangent cbrt : cube root ceil :", "ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the range plot of", "input element to the vColumn. Parameters ---------- x: float If", "precision but will also increase the time of the learning", "title=title) # ---# def normalize( self, method: str = \"zscore\",", "self, method: str = \"auto\", max_cardinality: int = 6, numcol:", "aggregation. max_cardinality: int, optional Maximum number of the vColumn distinct", "The bandwidth of the kernel. kernel: str, optional The method", "(h) or h <= 0: if nbins <= 0: h", "self.category() == \"float\" else \"\" if index < 0: index", "using a mean encoding with {} as Response Column.\".format( self.alias,", "True, NULL values will not be considered during the computation.", "\"round\", \"sign\", \"sin\", \"sinh\", \"sqrt\", \"tan\", \"tanh\", ], ), (\"x\",", "Classifier otherwise. Example: Write {\"n_estimators\": 20, \"max_depth\": 10} to train", "raise QueryError( \"{}\\nError when applying the func 'x -> {}'", "column) )[self.alias].density( bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=(xmin, xmax), ax=ax, **updated_dict(param, style_kwds,", "a copy of the vColumn. \"\"\" check_types([(\"new_name\", new_name, [str])]) old_name", "changing the parameter 'new_name', you'll be able to solve this", "response ) ) if verticapy.options[\"print_info\"]: print(\"The mean encoding was successfully", "input vColumns using the input method. Parameters ---------- method: str,", "/ {}({} - {})\".format( \"{}\", cmin, \"NULLIFZERO\" if (nullifzero) else", "--------------------------------------------------------------------------- Aggregates the vColumn using 'sum'. Returns ------- float sum", "\"robust_zscore\", \"minmax\"]), (\"by\", by, [list]), (\"return_trans\", return_trans, [bool]), ] )", "vColumns used in the partition. return_trans: bool, optimal If set", "elem: self.catalog[elem] = sauv[elem] elif elem == None: self.catalog[elem] =", "optional List of the different transformations. Each transformation must be", "check_types( [ ( \"func\", func, [ \"abs\", \"acos\", \"asin\", \"atan\",", "for the discretization (must be > 1) k: int, optional", "\"\"\" --------------------------------------------------------------------------- Adds the input element to the vColumn. Parameters", "be closed. If set to True, the intervals will be", "Draws the vColumn Density Plot. Parameters ---------- by: str, optional", "[int, float])]) if n == 1: pre_comp = self.parent.__get_catalog_value__(self.alias, \"top\")", "+ prefix_sep.replace('\"', \"_\") ) n = 1 if drop_first else", "parameter breaks must be equal to the length of parameter", "and machine learning features. It supports the # entire data", "self.alias, p_alpha, self.alias, self.alias, p_1_alpha, ) mean_alpha, mean_1_alpha = [", "the vColumn is boolean. vDataFrame[].isdate : Returns True if the", "{} > {})\".format( self.parent.__genSQL__(), self.alias, self.alias, p_alpha, self.alias, self.alias, p_1_alpha,", "for elem in result if elem[1] != None ] ),", "[int, float]), ] ) if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of)", "Aggregates the vColumn using 'avg' (Average). Returns ------- float average", "USING PARAMETERS percentile = 0.25) AS 'approx_25%', APPROXIMATE_PERCENTILE ({3}{4} USING", "_verticapy_cnt_, 100 * COUNT(*) / {} AS percent FROM {}{}", "self.alias, self.transformations = ( parent, alias, [elem for elem in", "from verticapy.plot import boxplot return boxplot(self, by, h, max_cardinality, cat_priority,", "uses a ‘pipeline’ mechanism to sequentialize # data transformation operations,", "int(total) conj = \"s were \" if total > 1", "# distributed under the License is distributed on an \"AS", "be transformed. Parameters ---------- field: str The field to extract.", "object, optional The axes to plot on. **style_kwds Any optional", "20, \"max_depth\": 8, \"nbins\": 100}) model.set_params(RFmodel_params) parameters = model.get_params() try:", "transform the vColumn. abs : absolute value acos : trigonometric", "of, max_cardinality, nbins, h, ax=ax, **style_kwds) # ---# def boxplot(", "If set to True, the intervals will be closed on", "self.parent See Also -------- vDataFrame.add_copy : Creates a copy of", "mode (most occurent element). 0ifnull : 0 when the vColumn", "int, optional Cardinality threshold to use to determine if the", "elements in the vColumn. \"\"\" check_types([(\"n\", n, [int, float])]) query", ") query += [lp + tmp_query + rp] query =", "variables.\" ) warnings.warn(warning_message, Warning) else: distinct_elements = self.distinct() expr =", ": Looks if some specific records are in the vDataFrame.", "= max( len(self.parent[column].transformations), max_floor ) except: pass max_floor -= len(self.transformations)", "be numerical\" ) cast = \"::int\" if (self.parent[numcol].isbool()) else \"\"", "= \"COALESCE({}, LAST_VALUE({} IGNORE NULLS) OVER ({} ORDER BY {}))\".format(", ") fd = max(2.0 * (vColumn_075 - vColumn_025) / (count)", "to True, the approximate cardinality is returned. By setting this", "------- vDataFrame The vDataFrame of the search. See Also --------", "kwargs[\"agg\"], self.parent.__genSQL__(), self.alias, ) query_result = executeSQL( query=query, title=\"Computing the", "ISOYEAR / MICROSECONDS / MILLENNIUM / MILLISECONDS / MINUTE /", "kernel. logistic : Logistic kernel. sigmoid : Sigmoid kernel. silverman", "the vColumn by transforming the values lesser than the lower", "bool, optional If set to True, the record will be", "in kwargs): kwargs[\"legend\"] = True if not (\"figsize\" in kwargs):", "Computes the vColumn descriptive statistics. \"\"\" check_types([(\"k\", k, [int, float]),", "self.parent.__genSQL__(), by[0], ), title=\"Computing the different categories {} to normalize.\".format(", "-------- vDataFrame.case_when : Creates a new feature by evaluating some", "gen_tmp_name(schema=schema, name=\"kde\") if isinstance(xlim, (tuple, list)): xlim_tmp = [xlim] else:", "For more information, see utilities.tablesample. See Also -------- vDataFrame.analytic :", "iv_woe.\".format(y) ) self.parent[y].distinct() trans = self.discretize( method=\"same_width\" if self.isnum() else", "values = { \"index\": [item[0] for item in result], \"count\":", "BY _verticapy_cnt_ DESC {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.shape()[0], self.parent.__genSQL__(), dropna,", "\"mode\") and (val == None): val = self.mode(dropna=True) if val", "{}, \"cramer\": {}, \"biserial\": {}, \"regr_avgx\": {}, \"regr_avgy\": {}, \"regr_count\":", "float, optional Lower bound. upper: float, optional Upper bound. Returns", "if len(args) > 0: column = args[0] elif \"column\" in", "The vColumn '{}' was normalized with the method '{}'.\".format( self.alias,", "executeSQL( query=query, title=\"Computing the quantiles of {}.\".format(self.alias), method=\"fetchrow\", ) if", "( \"start_date\", start_date, [str, datetime.datetime, datetime.date, int, float], ), (", "self mad, med = self.aggregate([\"mad\", \"approx_median\"]).values[self.alias] mad *= 1.4826 if", "if self.isnum() and method == \"smart\": schema = verticapy.options[\"temp_schema\"] if", "/ # / / # \\ / # \\ /", "str = \"\", return_enum_trans: bool = False, ): \"\"\" ---------------------------------------------------------------------------", "---------- n: int, optional Offset. Returns ------- tablesample An object", "plot. The other categories will be filtered. ax: Matplotlib axes", "([0, 1], [1, 0]) or self.isbool(): all_new_features = [] prefix", "bool, optional Uses numbers as suffix instead of the vColumns", "vDataFrame.boxplot : Draws the Box Plot of the input vColumns.", "\"topk\"], ), (\"return_enum_trans\", return_enum_trans, [bool]), ] ) method = method.lower()", "\"\"\" --------------------------------------------------------------------------- Extracts a specific TS field from the vColumn", "= 8, cat_priority: list = [], ax=None, **style_kwds, ): \"\"\"", "5). of: str, optional The vColumn to use to compute", "AS count, 100 * COUNT({1}) / {2} AS percent, AVG({3}{4})", "\"\"\" pre_comp = self.parent.__get_catalog_value__(self.alias, \"store_usage\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return", "vDataFrame[].describe : Computes the vColumn descriptive statistics. \"\"\" return self.describe(method=\"categorical\",", "\"SELECT {}::{} AS {} FROM {} WHERE {} IS NOT", "values: for i in range(len(values[elem])): if isinstance(values[elem][i], decimal.Decimal): values[elem][i] =", "{} IS NOT NULL GROUP BY 1) x ORDER BY", "optional Labels used to name the new categories. If empty,", "RandomForestRegressor, ) drop(tmp_model_name, method=\"model\") if self.parent[response].category() == \"float\": model =", "\", \".join(by) ) else: new_column = \"COALESCE({}, {}({}) OVER (PARTITION", "a Robust Z-Score - The MAD is null !\".format( self.alias", "\\ / / / # \\ / / / #", "\"\"\" check_types([(\"new_name\", new_name, [str])]) old_name = quote_ident(self.alias) new_name = new_name.replace('\"',", "h, [int, float]), ] ) if of: self.parent.are_namecols_in(of) of =", "equal frequency histogram bins.\", method=\"fetchall\", ) result = [elem[0] for", "-------- vDataFrame.astype : Converts the vColumns to the input type.", "---------- approx: bool, optional If set to True, the approximate", "pre_comp assert n >= 1, ParameterError(\"Parameter 'n' must be greater", "abs(self): \"\"\" --------------------------------------------------------------------------- Applies the absolute value function to the", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"variance\"]).values[self.alias][0] variance = var", "\" for i in range(1, n): trans += \"WHEN {}", "store usage. See Also -------- vDataFrame.expected_store_usage : Returns the vDataFrame", "x) return self.apply(func=expr) # ---# def astype(self, dtype: str): \"\"\"", "self.category() == \"int\": best_h = max(math.floor(best_h), 1) return best_h #", "{}, \"regr_syy\": {}, } for elem in catalog: self.catalog[elem] =", "ord, SUM(1 - {}) AS non_events, SUM({}) AS events FROM", "round(self, n: int): \"\"\" --------------------------------------------------------------------------- Rounds the vColumn by keeping", "categories. If empty, names will be generated. include_lowest: bool, optional", "distinct_count, is_numeric, is_date = ( self.nunique(), self.isnum(), self.isdate(), ) if", "/ ISODOW / ISOWEEK / ISOYEAR / MICROSECONDS / MILLENNIUM", "PARAMETERS percentile = 0.5) AS 'approx_50%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS", "] ) start_or_end = \"START\" if (start) else \"END\" return", "Parameters ---------- func: str, Function in pure SQL used to", "method == \"topk\": assert k >= 2, ParameterError( \"Parameter 'k'", "in ffill|pad|bfill|backfill then 'order_by' must be a list of at", "\", \".join(by) ), ) else: cmax, cmin = ( \"MAX({})", "* IQR / n ** (1 / 3)] sturges :", "self.parent, columns, method, of, max_cardinality, h, ax=ax, **style_kwds, ) #", "See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations.", "self.parent.__genSQL__(), self.alias ) executeSQL(query, title=\"Testing the Type casting.\") self.transformations +=", "able to look at the final transformation. Returns ------- vDataFrame", "using the input Name. Returns ------- vDataFrame self.parent See Also", "if \"count\" in sauv: self.catalog[\"count\"] = sauv[\"count\"] self.catalog[\"percent\"] = (", "|. # |_)\\/ |_)(_|(_|| \\_/|_|(_||| # / # ____________ ______", "bool = True, ): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "\"\"\" --------------------------------------------------------------------------- Draws the box plot of the vColumn. Parameters", "in result if elem[2] != None ] ), ) executeSQL(", "AS verticapy_agg FROM {} WHERE {} IS NOT NULL GROUP", "\"float\" elif method == \"0ifnull\": category, ctype = \"int\", \"bool\"", ": Replaces the regular expression matches in each of the", "under the License is distributed on an \"AS IS\" BASIS,", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies", "elements. Parameters ---------- n: int, optional Offset. Returns ------- tablesample", "kernel=kernel, nbins=nbins, xlim=xlim_tmp, store=False, ) try: result = model.fit(self.parent.__genSQL__(), [self.alias]).plot(", "for the discretization. topk : Keeps the topk most frequent", "== \"zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"mean\"]) / sauv[ \"std\"", "(\"h\", h, [int, float]), (\"nbins\", nbins, [int, float]), ] )", "the NULL value. winsorize : Clips the vColumn using as", "self.discretize( method=\"same_width\" if self.isnum() else \"topk\", nbins=nbins, k=nbins, new_category=\"Others\", return_enum_trans=True,", "self.aggregate([\"mad\", \"approx_median\"]).values[self.alias] mad *= 1.4826 if mad != 0: if", "mean skewness : skewness sum : sum std : standard", "/ / # \\ / / / # \\ /", "the regular expression in each record of the vColumn. vDataFrame[].extract", "type. \"\"\" return self.transformations[-1][2] # ---# def clip(self, lower=None, upper=None):", "+= [(new_column, ctype, category)] try: sauv = {} for elem", "\\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\", "plot function. For more information, see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html Returns -------", "= \"NULL\" if mean_1_alpha == None: mean_alpha = \"NULL\" self.apply(", "the MIN and the MAX. MAX = MIN !\".format( self.alias", "def slice(self, length: int, unit: str = \"second\", start: bool", "the vColumn category is date. \"\"\" return self.category() in (\"float\",", "vColumn alias. transformations: list, optional List of the different transformations.", "int = 30): \"\"\" --------------------------------------------------------------------------- Returns the k most occurent", "if labels: label = labels[idx] else: label = f\"{close_l}{first_elem};{second_elem}{close_r}\" conditions", "/ / # | \\/ / / / # |______", "self.parent.__genSQL__(), self.alias, self.alias, n ) title = \"Reads {} {}", "new_name: str The new vColumn alias. Returns ------- vDataFrame self.parent", "self.parent.__genSQL__() ), print_time_sql=False, ) except: cmax, cmin = ( \"MAX({})", "self.parent.format_colnames(ts) from verticapy.plot import range_curve_vdf return range_curve_vdf( self, ts, q,", "# ---# def discretize( self, method: str = \"auto\", h:", "(\"h\", h, [int, float]), (\"cat_priority\", cat_priority, [list]), ] ) if", "= self.min() table = \"(SELECT DATEDIFF('second', '{}'::timestamp, {}) AS {}", "function to the vColumn. Parameters ---------- func: str Function to", "\"LIMIT {}\".format(k) dropna = \" WHERE {} IS NOT NULL\".format(self.alias)", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"max\"]).values[self.alias][0] # ---# def mean_encode(self,", "dtype, self.alias, self.parent.__genSQL__(), self.alias ) executeSQL(query, title=\"Testing the Type casting.\")", "2) + 2\". copy_name: str, optional If not empty, a", "of at least one element to use to order the", "column = kwargs[\"column\"] else: check = False if check: self.parent.are_namecols_in(column)", "each of the vColumn records. vDataFrame[].str_count : Computes the number", "({})\".format( \", \".join([\"1\"] + nth_elems + [str(count)]) ) query =", "One vDataFrame can have multiple children vColumns whereas one vColumn", "\"\"\" --------------------------------------------------------------------------- Encodes the vColumn using a user-defined encoding. Parameters", "h, h, h, floor_end ), \"varchar\", \"text\", ) else: trans", "OVER (PARTITION BY {}))\".format( \"{}\", fun, \"{}\", \", \".join(by) )", "len(result) trans = \"(CASE \" for i in range(1, n):", "License. # # |_ |~) _ _| _ /~\\ _", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"mad\"]).values[self.alias][0] # ---# def", "if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import hist", "(No h will be picked or computed) nbins: int, optional", "pt_events) * ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS iv FROM ({})", "self.alias for idx in range(len(breaks) - 1): first_elem, second_elem =", "\"\"\" --------------------------------------------------------------------------- Returns a part of the vColumn (delimited by", "\")\" else: lp, rp = \"\", \"\" for category in", "vColumn using the mean encoding of a response. \"\"\" check_types(", "( 100 * sauv[\"count\"] / self.parent.shape()[0] ) for elem in", "= self.parent.__get_catalog_value__(self.alias, \"store_usage\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp store_usage", "+= ( \"UNION ALL (SELECT 'Others', SUM(count) FROM (SELECT COUNT(*)", "then 'order_by' must be a list of at least one", "the method 'smart'.\" ) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) drop(tmp_view_name, method=\"view\")", ": variance Other aggregations could work if it is part", "simplify several processes with its abstractions. Parameters ---------- alias: str", "med = self.aggregate([\"mad\", \"approx_median\"]).values[self.alias] mad *= 1.4826 if mad !=", "Catalog of pre-computed aggregations. parent, vDataFrame : Parent of the", "the vColumn {} named {} was added to the vDataFrame.\".format(", "inverse cosine asin : trigonometric inverse sine atan : trigonometric", "return self.category() in (\"float\", \"int\") # ---# def iv_woe(self, y:", "it is used to compute other vColumns. Parameters ---------- add_history:", "of the vColumn. vDataFrame[].extract : Extracts the regular expression in", "category)] try: sauv = {} for elem in self.catalog: sauv[elem]", "set to True, the approximate median is returned. By setting", "plot_median, ax=ax, **style_kwds, ) # ---# def rename(self, new_name: str):", "vColumn. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply :", "WHERE {} IS NOT NULL GROUP BY {} ORDER BY", "result[\"avg\"][0], result[\"std\"][0], threshold ) ) else: p_alpha, p_1_alpha = (", ": Computes the vDataFrame input aggregations. \"\"\" return self.quantile(0.5, approx=approx)", "\"\") assert not (self.parent.is_colname_in(new_name)), NameError( f\"A vColumn has already the", "/\\ ___\\ /\\ __ \\ /\\ \\ /\\ \\/\\ \\", "= 100 self.parent.__add_to_history__( \"[Label Encoding]: Label Encoding was applied to", "\\ \\ \\ \\-./\\ \\ \\ \\ \\-. \\ #", "the License. # You may obtain a copy of the", "2, TypeError( \"vColumn {} must be binary to use iv_woe.\".format(y)", "during the computation. max_cardinality: int, optional Cardinality threshold to use", "(ex: AVG(column1) + 5). ax: Matplotlib axes object, optional The", "plot( self, ts: str, by: str = \"\", start_date: Union[str,", "decimal.Decimal): top = float(top) self.parent.__update_catalog__( {\"index\": [\"top{}\".format(n)], self.alias: [top]} )", "] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) columns =", "self.alias, self.parent.__genSQL__(), self.alias, self.alias, self.alias, ) else: query = \"SELECT", "h, h, \"{}\", h, h, h, floor_end ), \"varchar\", \"text\",", "self.parent.__genSQL__(), self.alias, ) query_result = executeSQL( query=query, title=\"Computing the distinct", "float])]) if offset < 0: offset = max(0, self.parent.shape()[0] -", "**style_kwds) # ---# def boxplot( self, by: str = \"\",", "= self.drop(add_history=False) parent.__add_to_history__( \"[Rename]: The vColumn {} was renamed '{}'.\".format(old_name,", "= len(self.parent[response].transformations) - len( self.transformations ) for k in range(max_floor):", "+ \" END\" self.apply(func=expr) # ---# def ctype(self): \"\"\" ---------------------------------------------------------------------------", "like (date, datetime, timestamp...) or numerical. q: tuple, optional Tuple", "i + 1] ... ELSE argv[n] END Returns ------- vDataFrame", "func) or ( re.search( re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"', \"\"))), func ) ): max_floor", "vColumn (only if the vColumn type is date like). The", "* i] THEN argv[2 * i + 1] ... END", "\"\", by: list = [], order_by: list = [], ):", "optional vColumns used in the partition. return_trans: bool, optimal If", "(elem[0] == None) else \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) ) result[idx][1] = \"NULL\"", "(\"return_trans\", return_trans, [bool]), ] ) method = method.lower() self.parent.are_namecols_in(by) by", "Slice size unit. For example, it can be 'minute' 'hour'...", "RF Regressor will be trained if the response is numerical", "in (\"mean\", \"median\") or isinstance(val, float): category, ctype = \"float\",", "check = False if check: self.parent.are_namecols_in(column) column = self.parent.format_colnames(column) columns", "TypeError( \"The column 'numcol' must be numerical\" ) cast =", "the dummies ({name}).\\n\" \"It can be the result of using", "None: warning_message = \"The vColumn {} has no mode (only", "BY {0} ORDER BY COUNT(*)\" \" DESC OFFSET {1}) VERTICAPY_SUBTABLE)", "vColumn using the input functions. Parameters ---------- func: list List", "check_types( [ (\"ts\", ts, [str]), (\"by\", by, [str]), (\"start_date\", start_date,", "(\"upper\", upper, [float, int])]) assert (lower != None) or (upper", "= [self.alias] check = True if len(args) > 0: column", "= quote_ident(name.replace('\"', \"_\")) assert name.replace('\"', \"\"), EmptyParameter( \"The parameter 'name'", "already the alias {new_name}.\\nBy changing the parameter 'new_name', you'll be", "of the vColumns 1 and 2 bars. It is only", "in the vDataFrame. \"\"\" if isinstance(val, str) or not (isinstance(val,", "upper bound quantile(1-alpha) if 'use_threshold' is set to False else", "------- ax Matplotlib axes object See Also -------- vDataFrame.bar :", "None: new_column = \"COALESCE({}, '{}')\".format(\"{}\", val) elif expr: new_column =", "Returns the vColumn memory usage. Returns ------- float vColumn memory", "numcol: str, optional Numerical vColumn to use when the parameter", "x: float = 2): \"\"\" --------------------------------------------------------------------------- Applies a default function", "title=title, ) tail.count = self.parent.shape()[0] tail.offset = offset tail.dtype[self.alias] =", "self.catalog[\"median\"] = 0 self.catalog[\"mad\"] = 1 / 1.4826 elif method", "self.apply(func=st.decode(str_sql(\"{}\"), *argv)) # ---# def density( self, by: str =", "datetime.datetime, datetime.date, int, float], ), ( \"end_date\", end_date, [str, datetime.datetime,", "of: str = \"\", max_cardinality: Union[int, tuple] = (6, 6),", "\"prod\": 0, }, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name.replace('\"', \"\"),", "see utilities.tablesample. See Also -------- vDataFrame.analytic : Adds a new", "self.parent.get_columns(), 0 for column in all_cols: try: if (quote_ident(column) in", "width if the vColumn is numerical or of type date", "] ), ) executeSQL( \"SELECT {}, {} FROM {} LIMIT", "of the vColumn. vDataFrame[].str_slice : Slices the vColumn. \"\"\" check_types([(\"to_replace\",", "{}({})\".format( self.alias, avg, \"NULLIFZERO\" if (nullifzero) else \"\", stddev )", "Computes the vDataFrame Global Outliers. \"\"\" if isinstance(by, str): by", "or greater than quantile(1-alpha) will be filled. Returns ------- vDataFrame", "vDataFrame[].decode : Encodes the vColumn with user defined Encoding. vDataFrame[].discretize", "a time series rule. \"\"\" return self.apply(func=\"DATE_PART('{}', {})\".format(field, \"{}\")) #", "query_result] else: result = ( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False )", "Dummies]: One hot encoder was applied to the vColumn {}\\n{}", "# data around for processing, VerticaPy brings the logic to", "\"float\", \"float\", ) ] else: warning_message = \"Can not normalize", "used to cut the vColumn. labels: list, optional Labels used", "/ {2} AS percent, AVG({3}{4}) AS mean, STDDEV({3}{4}) AS std,", "Freedman Diaconis and Sturges. freedman_diaconis : Freedman Diaconis [2 *", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"stddev\"]).values[self.alias][0] stddev =", "\"''\") ) ) # ---# def str_slice(self, start: int, step:", "--------------------------------------------------------------------------- Aggregates the vColumn using multiple statistical aggregations: min, max,", "(PARTITION BY {})\".format( self.alias, \", \".join(by) ), ) if return_trans:", "if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": if not (dropna) and (pre_comp !=", "method when first preparing your data. Parameters ---------- new_name: str", "- The Standard Deviation is null !\".format( self.alias ) warnings.warn(warning_message,", "except: pass max_floor -= len(self.transformations) if copy_name: self.add_copy(name=copy_name) for k", "if distinct_count > max_cardinality: query += ( \"UNION ALL (SELECT", "Sum of the vColumn 'of'. q% : q Quantile of", "setattr(self.parent, name.replace('\"', \"\"), new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] all_new_features += [name]", "6, nbins: int = 0, h: float = 0, ax=None,", "the vColumn. \"\"\" check_types([(\"n\", n, [int, float])]) query = \"SELECT", "the tail of the vColumn. Parameters ---------- limit: int, optional", "of elements to skip. Returns ------- tablesample An object containing", "Parameters ---------- ts: str TS (Time Series) vColumn to use", "AVG(column1) + 5). ax: Matplotlib axes object, optional The axes", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"avg\"]).values[self.alias][0] mean = avg #", "\"{}\")) else: return self.apply(func=\"{} + ({})\".format(\"{}\", x)) # ---# def", "False rose = True if pie_type == \"rose\" else False", "bool = True, use_numbers_as_suffix: bool = False, ): \"\"\" ---------------------------------------------------------------------------", "= \"public\" tmp_view_name = gen_tmp_name(schema=schema, name=\"view\") tmp_model_name = gen_tmp_name(schema=schema, name=\"model\")", "PARAMETERS model_name = '{}', tree_id = {}, format = 'tabular'))\".format(", "[2 * IQR / n ** (1 / 3)] sturges", "(ORDER BY {}) AS _verticapy_row_nb_ FROM {} WHERE {} IS", "using the MIN and the MAX. MAX = MIN !\".format(", "ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the spider plot of", "in (1, None), ValueError( \"vColumn doesn't allow slicing having steps", "0.9) AS 'approx_90%', MAX({3}{4}) AS max FROM vdf_table\"\"\".format( category, self.alias,", "\"\"\" --------------------------------------------------------------------------- Filters the vDataFrame where the vColumn is missing.", "(\"by\", by, [str]), (\"start_date\", start_date, [str, datetime.datetime, datetime.date]), (\"end_date\", end_date,", "+ rp] query = \"WITH vdf_table AS (SELECT * FROM", "index = result.values[\"index\"] result = result.values[self.alias] elif (method == \"cat_stats\")", "def isnum(self): \"\"\" --------------------------------------------------------------------------- Returns True if the vColumn is", "aggregate # ---# def apply(self, func: str, copy_name: str =", ") if use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values self.parent.filter( \"ABS({} -", "name = gen_tmp_name(schema=schema, name=\"kde\") if isinstance(xlim, (tuple, list)): xlim_tmp =", "\"acos\", \"asin\", \"atan\", \"cbrt\", \"ceil\", \"cos\", \"cosh\", \"cot\", \"exp\", \"floor\",", "See Also -------- vDataFrame.filter: Filters the data using the input", "AND {column} {op2} '{second_elem}' THEN '{label}'\" ] expr = \"CASE", "[\"auto\", \"smart\", \"same_width\", \"same_freq\", \"topk\"], ), (\"return_enum_trans\", return_enum_trans, [bool]), ]", ") return parent # ---# def round(self, n: int): \"\"\"", "\\/_/ \\/_/ # # # ---# class vColumn(str_sql): \"\"\" ---------------------------------------------------------------------------", "query_result for item in sublist] # ---# def div(self, x:", "method=\"fetchall\", ) result = [elem[0] for elem in result] elif", "list(args) check_types([(\"val\", val, [list])]) val = {self.alias: val} return self.parent.isin(val)", "assert index.step in (1, None), ValueError( \"vColumn doesn't allow slicing", "self.alias, ), \"apply_test_feature\", ) except: ctype = get_data_types( \"SELECT {}", "categorical (No h will be picked or computed) nbins: int,", "\"CASE WHEN \" + \" WHEN \".join(conditions) + \" END\"", "collections.abc import Iterable from typing import Union # VerticaPy Modules", "\"x\"), ) ) return self.parent except Exception as e: raise", "*argv)) # ---# def density( self, by: str = \"\",", "str): order_by = [order_by] check_types( [ ( \"method\", method, [", "life cycle, uses a ‘pipeline’ mechanism to sequentialize # data", "# / __ `\\ / / # | \\/ /", "smallest elements in the vColumn. Parameters ---------- n: int, optional", "\"\"))), func ) ): max_floor = max( len(self.parent[column].transformations), max_floor )", "categorical vColumns. bfill : Back Propagation of the next element", "[val] val += list(args) check_types([(\"val\", val, [list])]) val = {self.alias:", "TS (Time Series) vColumn to use to order the data.", "elements, please use a method in zscore|minmax\" warnings.warn(warning_message, Warning) return", "each of the vColumn record by an input value. The", "to use to convert the vColumn. If this parameter is", "filling.\".format(e)) if total > 0: try: if \"count\" in sauv:", "vColumn. Parameters ---------- by: str, optional vColumn to use to", "elements will be gathered together to create a new category", "(only missing values).\\nNothing was filled.\".format( self.alias ) warnings.warn(warning_message, Warning) return", "of {} (response = {}).\".format(self.alias, y) result = to_tablesample(query, title=title)", "(1.4826 * mad) minmax : Normalization using the MinMax (min", "self.parent # ---# def fillna( self, val=None, method: str =", "parameter is used for testing purpose. Returns ------- vDataFrame self.parent", "/ (count) ** (1.0 / 3.0), 1e-99) if method.lower() ==", "\"_\") .replace(\"'\", \"_\") ) expr = \"DECODE({}, '{}', 1, 0)\".format(", "[\"DECODE({}\"] text_info = \"\\n\" for k in range(len(distinct_elements)): expr +=", "set to True, the approximate cardinality is returned. By setting", "can be seen as one column of that relation. vColumns", "prefix_sep, [str]), (\"drop_first\", drop_first, [bool]), (\"use_numbers_as_suffix\", use_numbers_as_suffix, [bool]), ] )", "relation, a vColumn can be seen as one column of", "self.apply(func=\"TIMESTAMPADD(SECOND, -({}), {})\".format(x, \"{}\")) else: return self.apply(func=\"{} - ({})\".format(\"{}\", x))", "vColumn with a user defined Encoding. vDataFrame[].discretize : Discretizes the", "h.\", method=\"fetchrow\", ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = result", "sub(self, x: float): \"\"\" --------------------------------------------------------------------------- Subtracts the input element from", ": Computes the vDataFrame input aggregations. \"\"\" check_types([(\"dropna\", dropna, [bool]),", "---# def __getitem__(self, index): if isinstance(index, slice): assert index.step in", "\"UNION ALL (SELECT 'Others', SUM(count) FROM (SELECT COUNT(*) AS count\"", "\"\"\" return self.iloc(limit=limit) # ---# def hist( self, method: str", "index_stop += self.parent.shape()[0] limit = index_stop - index_start if limit", "Forest.\", method=\"fetchall\", ) result = [elem[0] for elem in result]", "return_trans: bool, optimal If set to True, the method will", "around for processing, VerticaPy brings the logic to the data.", "Propagation of the first element (Constant Interpolation). mean : Average.", "limit, offset, ), title=title, ) tail.count = self.parent.shape()[0] tail.offset =", "trans = self.discretize( method=\"same_width\" if self.isnum() else \"topk\", nbins=nbins, k=nbins,", "vDataFrame self.parent See Also -------- vDataFrame[].dropna : Drops the vColumn", "\"START\" if (start) else \"END\" return self.apply( func=\"TIME_SLICE({}, {}, '{}',", "spider as spider_plot return spider_plot( self.parent, columns, method, of, max_cardinality,", "number of vColumn distinct elements to be used as categorical.", "one of the following: date / int / float /", "{} THEN '[{};{}]' \".format( \"{}\", result[i - 1], result[i], result[i", "{})\".format(response, \"{}\"), \"int\", \"float\") ] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Mean Encode]:", "independent variable in relation to the dependent variable. Parameters ----------", "events / NULLIFZERO(SUM(events) OVER ()) AS pt_events FROM ({}) x\".format(", "self.apply(func=\"ROUND({}, {})\".format(\"{}\", n)) # ---# def sem(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "self, method: str = \"auto\", h: float = 0, nbins:", "top{} categories of {}.\".format( k if k > 0 else", "\\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\", "1 ORDER BY 2 DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY", "check_types([(\"dtype\", dtype, [str])]) try: query = \"SELECT {}::{} AS {}", "vDataFrame[].label_encode : Encodes the vColumn with Label Encoding. vDataFrame[].mean_encode :", "mean encoding was successfully done.\") return self.parent # ---# def", "None ] ), ) cmax = \"DECODE({}, {}, NULL)\".format( by[0],", "return. dropna: bool, optional If set to True, NULL values", "occurent elements to return. dropna: bool, optional If set to", "# # |_ |~) _ _| _ /~\\ _ |.", "/ /\\ ___\\ /\\ __ \\ /\\ \\ /\\ \\/\\", "Function in pure SQL used to transform the vColumn. The", "column in self.parent._VERTICAPY_VARIABLES_[\"columns\"] ] force_columns.remove(self.alias) executeSQL( \"SELECT * FROM {}", "isinstance(by, str): by = [by] if isinstance(order_by, str): order_by =", "# |_ |~) _ _| _ /~\\ _ |. #", "the vColumn. Parameters ---------- by: str, optional vColumn to use", "missing. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.filter: Filters", "{})\".format( self.alias, \", \".join(by) ), \"MIN({}) OVER (PARTITION BY {})\".format(", "use to sort the data when using TS methods. Returns", "float])]) assert x != 0, ValueError(\"Division by 0 is forbidden", "\"\"\"(SELECT {0} || '', COUNT(*) FROM vdf_table GROUP BY {0}", "[], order_by: list = [], ): \"\"\" --------------------------------------------------------------------------- Fills missing", "+= [ \"'{}', {}\".format(str(distinct_elements[k]).replace(\"'\", \"''\"), k) ] text_info += \"\\t{}", "---# def str_slice(self, start: int, step: int): \"\"\" --------------------------------------------------------------------------- Slices", "sturges : Sturges [CEIL(log2(n)) + 1] Returns ------- float optimal", "first preparing your data. Parameters ---------- new_name: str The new", "this parameter will increase the precision but will also increase", "/ # \\ / / / # \\_______/ / /", "function's performance can drastically decrease. Returns ------- float/str median See", "max_cardinality: int, optional Cardinality threshold to use to determine if", "] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) if by: self.parent.are_namecols_in(by) by", "vColumn using an input 'quantile'. Parameters ---------- x: float A", ") values = to_tablesample(query, title=title).values elif ( ((distinct_count < max_cardinality", "for item in query_result] index = [\"unique\", \"count\"] + [item[0]", "based on an aggregation. \"\"\" if isinstance(pie_type, str): pie_type =", "\"\"\" return self.aggregate(func=[\"prod\"]).values[self.alias][0] prod = product # ---# def quantile(self,", "order the data. The vColumn type must be date like", "self.parent.__add_to_history__( \"[Apply]: The vColumn '{}' was transformed with the func", "= avg # ---# def bar( self, method: str =", "bool, optional Drops the first dummy to avoid the creation", ": trigonometric tangent tanh : hyperbolic tangent x: int/float, optional", "this method when first preparing your data. Parameters ---------- new_name:", "transformations = self.transformations + [(expr, \"bool\", \"int\")] new_vColumn = vColumn(", "[], self.alias for idx in range(len(breaks) - 1): first_elem, second_elem", "def discretize( self, method: str = \"auto\", h: float =", "most relevant interval to use for the discretization. topk :", "order_by: list = [], ): \"\"\" --------------------------------------------------------------------------- Fills missing elements", "the precision but will also increase the time of the", "start_date, end_date, area, step, ax=ax, **style_kwds, ) # ---# def", "If set to True, the approximate quantile is returned. By", "the right. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply", "freedman_diaconis : Freedman Diaconis [2 * IQR / n **", "str_count(self, pat: str): \"\"\" --------------------------------------------------------------------------- Computes the number of matches", "self.alias, len(all_new_features), conj, \", \".join(all_new_features) ) + \".\" ) return", "/\\ __ \\ /\\ \\ /\\ \\/\\ \\ /\\ \"-./", "Number of nbins. If empty, an optimized number of nbins", "AS (SELECT * FROM {}) (SELECT AVG({}) FROM vdf_table WHERE", "the upper bound to the upper bound itself. Parameters ----------", "the vColumn 'of'. sum : Sum of the vColumn 'of'.", "q: tuple, optional Tuple including the 2 quantiles used to", "COUNT(*) FROM vdf_table GROUP BY {0} ORDER BY COUNT(*) DESC", "null : Replaces the outliers by the NULL value. winsorize", "scoring phases. xlim: tuple, optional Set the x limits of", "{}))\".format( \"{}\", fun, \"{}\", \", \".join(by) ) else: new_column =", "end_date, [str, datetime.datetime, datetime.date, int, float], ), (\"plot_median\", plot_median, [bool]),", "second_elem = breaks[idx], breaks[idx + 1] if right: op1, op2,", "computed) h: float, optional Interval width of the bar. If", "[ Line2D( [0], [0], color=updated_dict(param, style_kwds, idx)[\"color\"], lw=4, ), ]", "Fouad are in the vColumn. You can write the following", "(\"color\" in kwargs): from verticapy.plot import gen_colors kwargs[\"color\"] = gen_colors()[0]", "{}) UNION ALL (SELECT AVG({}) FROM vdf_table WHERE {} >", "\"median\", \"ffill\", \"pad\", \"bfill\", \"backfill\", ], ), (\"expr\", expr, [str]),", "cosine cot : trigonometric cotangent exp : exponential function floor", ") ) if verticapy.options[\"print_info\"]: print(\"The mean encoding was successfully done.\")", "function floor : value down to the next whole number", "1 that represents the quantile. For example: 0.25 represents Q1.", "vColumn. vDataFrame[].label_encode : Encodes the vColumn with Label Encoding. vDataFrame[].get_dummies", "isinstance(order_by, str): order_by = [order_by] check_types( [ ( \"method\", method,", "vdf_table WHERE {0} IS NOT NULL GROUP BY {0} ORDER", "---# def __nonzero__(self): return self.count() > 0 # ---# def", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"sem\"]).values[self.alias][0]", "mode : most occurent element percent : percent of non-missing", "---# def count(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'count'", "verticapy from verticapy.utilities import * from verticapy.toolbox import * from", "the input type. Parameters ---------- dtype: str New type. Returns", "records by an input value. \"\"\" check_types([(\"start\", start, [int, float]),", "item in self.transformations], catalog=self.catalog, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name[1:-1],", "use \"POWER({}, 2) + 2\". copy_name: str, optional If not", "replace. value: str, optional New value. Returns ------- vDataFrame self.parent", "object Any amount of expressions. The expression generated will look", "+= [(expr, \"int\", \"int\")] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.catalog[\"count\"] = self.parent.shape()[0] self.catalog[\"percent\"]", "For more information, see utilities.tablesample. See Also -------- vDataFrame[].nsmallest :", "the 2 quantiles used to draw the Plot. start_date: str", "[int, float]), (\"numcol\", numcol, [str]), ] ) method = method.lower()", "drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") result = [self.min()] + result +", "( 100 * sauv[\"count\"] / self.parent.shape()[0] ) except: pass self.parent.__add_to_history__(", "\"public\" tmp_view_name = gen_tmp_name(schema=schema, name=\"view\") tmp_model_name = gen_tmp_name(schema=schema, name=\"model\") assert", "of moving # data around for processing, VerticaPy brings the", ": Combination of Freedman Diaconis and Sturges. freedman_diaconis : Freedman", "gen_colors() if not xlim: xmin = self.min() xmax = self.max()", "== \"cat_stats\") and (numcol != \"\"): numcol = self.parent.format_colnames(numcol) assert", "x)) # ---# def add_copy(self, name: str): \"\"\" --------------------------------------------------------------------------- Adds", "1\".format( self.alias, self.alias, self.parent.__genSQL__(), where, self.alias, n ), title=\"Computing the", "= \"\"\"SELECT '{0}' AS 'index', COUNT({1}) AS count, 100 *", "Filters the vDataFrame where the vColumn is missing. Returns -------", "if method not in [\"mode\", \"0ifnull\"]: max_floor = 0 all_partition", "else \"\", self.alias ), method=\"fetchall\", ) values = { \"index\":", "or not (labels), ParameterError( \"Length of parameter breaks must be", "def memory_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn memory usage. Returns", "= \"density\", of: str = \"\", max_cardinality: int = 6,", "fun, \"{}\", \", \".join(by) ) else: new_column = \"COALESCE({}, {}({})", "specific language governing permissions and # limitations under the License.", "FROM {}\".format( bin_spatial_to_str(self.category(), self.alias), self.parent.__genSQL__(), ), title=\"Computing the Store Usage", "to use to fill the vColumn outliers. mean : Replaces", "tmp_model_name = gen_tmp_name(schema=schema, name=\"model\") assert nbins >= 2, ParameterError( \"Parameter", "float sem See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "Exception as e: raise QueryError( \"{}\\nError when applying the func", "\"cot\", \"exp\", \"floor\", \"ln\", \"log\", \"log10\", \"mod\", \"pow\", \"round\", \"sign\",", "[float, int]), (\"upper\", upper, [float, int])]) assert (lower != None)", "you want to normalize by grouping by elements, please use", "preparing your data. Parameters ---------- new_name: str The new vColumn", "object containing the result. For more information, see utilities.tablesample. See", ") model.drop() return result except: model.drop() raise # ---# def", "\"\"\" --------------------------------------------------------------------------- Returns the vColumn DB type. Returns ------- str", "---------- start: int Start of the slicing. step: int Size", ": arithmetic square root tan : trigonometric tangent tanh :", "------- vDataFrame self.parent See Also -------- vDataFrame.filter: Filters the data", "kwargs): kwargs[\"legend\"] = True if not (\"figsize\" in kwargs): kwargs[\"figsize\"]", "the Bar Chart of the input vColumns based on an", "...), the parameter 'x' will represent the number of seconds,", "self.isdate(), TypeError( \"cut only works on numerical / date-like vColumns.\"", "aggregation. h: int/float/tuple, optional Interval width of the vColumns 1", "------- bool True if the vColumn is numerical. See Also", "self.count() if method not in [\"mode\", \"0ifnull\"]: max_floor = 0", "lesser than quantile(alpha) or greater than quantile(1-alpha) will be dropped.", "input aggregations. \"\"\" return self.aggregate([\"sum\"]).values[self.alias][0] # ---# def tail(self, limit:", "column for each vColumn category. In this case, the parameter", "must not be empty\" ) assert not (self.parent.is_colname_in(name)), NameError( f\"A", "__setattr__(self, attr, val): self.__dict__[attr] = val # # Methods #", "cmax, cmin = ( \"MAX({}) OVER (PARTITION BY {})\".format( self.alias,", "* i] THEN argv[2 * i + 1] ... ELSE", "timestamp...) or numerical. q: tuple, optional Tuple including the 2", "enough values to compute the Equal Frequency discretization\" ) total,", "WHERE {} IS NOT NULL LIMIT 20\".format( self.alias, dtype, self.alias,", "Uses popular numerical aggregations during the computation. max_cardinality: int, optional", "error of the mean skewness : skewness sum : sum", "type iqr : interquartile range kurtosis : kurtosis jb :", "+= [\"total\"] result.values[\"non_events\"] += [sum(result[\"non_events\"])] result.values[\"events\"] += [sum(result[\"events\"])] result.values[\"pt_non_events\"] +=", "method == \"same_freq\") or ( self.isnum() and method == \"smart\"", "different transformations. Each transformation must be similar to the following:", "= {}, format = 'tabular'))\".format( tmp_model_name, i ) for i", "using 'aad' (Average Absolute Deviation). Returns ------- float aad See", "float]), ] ) if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from", "conditions. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes the", "applied to the vColumn {} using the following mapping:{}\".format( self.alias,", "0: warning_message = \"Can not normalize {} using the MIN", "h: float = 0, pie_type: str = \"auto\", ax=None, **style_kwds,", "number of matches for the regular expression in each record", "int, optional Offset. Returns ------- tablesample An object containing the", "= [], self.alias for idx in range(len(breaks) - 1): first_elem,", "None) else str(elem[1]) new_column = \"COALESCE({}, DECODE({}, {}, NULL))\".format( \"{}\",", "try: result = executeSQL( \"SELECT {}, AVG({}), STDDEV({}) FROM {}", "self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += [(func, ctype, category)]", "range : difference between the max and the min sem", "topk : kth most occurent element (ex: top1 for the", "vColumn and creating a copy with the specified name. \\u26A0", "axes object See Also -------- vDataFrame[].hist : Draws the histogram", "self.quantile(0.5, approx=approx) # ---# def memory_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the", "self, threshold: float = 4.0, use_threshold: bool = True, alpha:", "to solve this \" \"issue.\" ) for k in range(len(distinct_elements)", "prefix: str = \"\", prefix_sep: str = \"_\", drop_first: bool", "[ column for column in self.parent._VERTICAPY_VARIABLES_[\"columns\"] ] force_columns.remove(self.alias) executeSQL( \"SELECT", "vDataFrame[].hist : Draws the histogram of the vColumn based on", "= gen_cmap()[0] else: if not (\"color\" in kwargs): from verticapy.plot", "{}{}) VERTICAPY_SUBTABLE\".format( self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index_start, limit, ) return vDataFrameSQL(query)", "vColumn type must be date like (date, datetime, timestamp...) or", "parent.__add_to_history__( \"[Rename]: The vColumn {} was renamed '{}'.\".format(old_name, new_name) )", "the vColumn descriptive statistics. \"\"\" return self.describe(method=\"categorical\", max_cardinality=k) # ---#", "p_1_alpha = ( self.parent.quantile([alpha, 1 - alpha], [self.alias]) .transpose() .values[self.alias]", "Copyright [2018-2022] Micro Focus or one of its affiliates. #", "numerical for Normalization\") return self.parent # ---# def nsmallest(self, n:", "vColumn has already the alias of one of the dummies", "\"Length of parameter breaks must be equal to the length", "{}) VERTICAPY_SUBTABLE ORDER BY split_value::float\".format( \" UNION ALL \".join(query), nbins", "), title=\"Computing the different categories {} to normalize.\".format( by[0] ),", "(start) else \"END\" return self.apply( func=\"TIME_SLICE({}, {}, '{}', '{}')\".format( \"{}\",", "sauv[elem] return self.parent # ---# def geo_plot(self, *args, **kwargs): \"\"\"", "List of values used to cut the vColumn. labels: list,", "bins used for the discretization (must be > 1) k:", "use to impute the vColumn. method: dict, optional Method to", "avg, stddev = self.aggregate([\"avg\", \"std\"]).values[self.alias] if stddev == 0: warning_message", "== 1) and (self.parent[by[0]].nunique() < 50): try: if fun ==", "nbins: int = 0, h: float = 0, ax=None, **style_kwds,", "a new category : 'Others'. cat_priority: list, optional List of", "most occurent element (ex: top1 for the mode) topk_percent :", "= executeSQL( query=query, title=\"Computing the distinct categories of {}.\".format(self.alias), method=\"fetchall\",", "encoding was successfully done.\") return self.parent # ---# def median(", "\"approx_unique\": 2, \"prod\": 0, }, ) setattr(self.parent, name, new_vColumn) setattr(self.parent,", "x -> x^2 + 2 use \"POWER({}, 2) + 2\".", ") executeSQL(query, title=\"Testing the Type casting.\") self.transformations += [ (", "== \"int\") else \"\" if (h > 1) or (self.category()", "_____________ / / # \\ / / / # \\", "the vColumn 'of'. q% : q Quantile of the vColumn", "6) if self.category() == \"int\": h = int(max(math.floor(h), 1)) floor_end", "result[idx][1] = \"NULL\" if (elem[1] == None) else str(elem[1]) new_column", "new vColumn alias. Returns ------- vDataFrame self.parent See Also --------", "regular expression in each record of the vColumn. vDataFrame[].str_slice :", "The vColumn {} was renamed '{}'.\".format(old_name, new_name) ) return parent", "self.apply(func=\"TIMESTAMPADD(SECOND, {}, {})\".format(x, \"{}\")) else: return self.apply(func=\"{} + ({})\".format(\"{}\", x))", "[] while total < int(float(count / int(nbins))) * int(nbins): nth_elems", "# ---# def describe( self, method: str = \"auto\", max_cardinality:", "min) / (max - min) by: list, optional vColumns used", "assert (lower != None) or (upper != None), ParameterError( \"At", "to 'numerical' if the vColumn is numerical , 'categorical' otherwise.", "elements to be used as categorical (No h will be", "as categorical. numcol: str, optional Numerical vColumn to use when", "\"COALESCE({}, {})\".format(\"{}\", val) elif (len(by) == 1) and (self.parent[by[0]].nunique() <", "Applies a function to the input vColumn. \"\"\" return self.apply(func=\"ABS({})\")", "response. \"\"\" check_types( [ (\"prefix\", prefix, [str]), (\"prefix_sep\", prefix_sep, [str]),", "result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].nsmallest", "A RF Regressor will be trained if the response is", ": Uses method 'same_width' for numerical vColumns, cast the other", "the vColumn is boolean. See Also -------- vDataFrame[].isdate : Returns", "+ {}{}) || ']'\".format( \"{}\", h, h, \"{}\", h, h,", "by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import gen_colors from", "boolean. vDataFrame[].isnum : Returns True if the vColumn is numerical.", "(method in (\"ffill\", \"pad\")) else \" DESC\" partition_by = (", "self.isnum() and method == \"smart\" ): n = len(result) trans", "self.transformations[-1][1].lower() dtype = ctype # ---# def date_part(self, field: str):", "most occurent elements. \"\"\" if \"agg\" not in kwargs: query", "for elem in catalog: self.catalog[elem] = catalog[elem] # ---# def", "(prefix) else prefix.replace('\"', \"_\") + prefix_sep.replace('\"', \"_\") ) n =", "FROM vdf_table WHERE {} < {}) UNION ALL (SELECT AVG({})", "ParameterError( \"Parameter 'nbins' must be greater or equals to 2", "not (isinstance(cat_priority, Iterable)): cat_priority = [cat_priority] check_types( [ (\"by\", by,", "tuple = None, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the", "\"bool\", \"int\")] new_vColumn = vColumn( name, parent=self.parent, transformations=transformations, catalog={ \"min\":", "if elem[2] != None else \"NULL\", ) for elem in", "[\"gaussian\", \"logistic\", \"sigmoid\", \"silverman\"]), (\"bandwidth\", bandwidth, [int, float]), (\"nbins\", nbins,", "cmin, cmax = self.aggregate([\"min\", \"max\"]).values[self.alias] if cmax - cmin ==", "if (n == 1) else str(int(n)) if isinstance(top, decimal.Decimal): top", "\\ /\\ \\/\\ \\ /\\ \"-./ \\ /\\ \"-.\\ \\", "self.parent if isinstance(val, str): val = val.replace(\"'\", \"''\") if val", "Number of most occurent elements to return. Returns ------- tablesample", "> 0: warning_message = \"The method 'robust_zscore' is available only", "AS pt_events FROM ({}) x\".format( self.alias, query, ) query =", "where each key corresponds to an aggregation. vColumns will memorize", "return executeSQL( query=query, title=\"Getting the vColumn element.\", method=\"fetchfirstelem\", ) else:", "ConversionError( \"{}\\nThe vColumn {} can not be converted to {}\".format(", "index): if isinstance(index, slice): assert index.step in (1, None), ValueError(", "example: 0.25 represents Q1. approx: bool, optional If set to", "+= [(\"{}\", self.ctype(), self.category())] self.transformations += final_transformation sauv = {}", "n largest vColumn elements. \"\"\" check_types([(\"n\", n, [int, float])]) query", "in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) try: if \"count\"", "return self.parent # ---# def aggregate(self, func: list): \"\"\" ---------------------------------------------------------------------------", "try: if \"count\" in sauv: self.catalog[\"count\"] = int(sauv[\"count\"]) + total", "bbox_to_anchor=[1, 0.5], ) ax.set_xlabel(self.alias) return ax kernel = kernel.lower() from", "BY {} DESC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias, n )", "\"\", method: str = \"density\", of: str = \"\", max_cardinality:", "\"pow\", \"round\"): expr = \"{}({})\".format(func.upper(), \"{}\") else: expr = \"{}({},", "copy_name.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) ) else: for k in", "{})\".format(\"{}\", n)) # ---# def sem(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "float], ), (\"plot_median\", plot_median, [bool]), ] ) self.parent.are_namecols_in(ts) ts =", "return the mode of the vColumn. Returns ------- str/float/int vColumn", "k: int, optional Number of most occurent elements to return.", "record by an input value. The vColumn will be transformed.", "vColumn expected store usage. See Also -------- vDataFrame.expected_store_usage : Returns", "if (elem[0] == None) else \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) ) result[idx][1] =", "elements, how often they occur, and other statistical information. Parameters", "\\/ / / / # |______ / / / #", "[ item[0] for item in executeSQL( query=query, title=\"Computing the average", "interval to use for the discretization. topk : Keeps the", "\"minmax\": if n == 0: nullifzero = 0 cmin, cmax", "# Modules # # Standard Python Modules import math, re,", "AS index, non_events, events, pt_non_events, pt_events, CASE WHEN non_events =", ": hyperbolic tangent x: int/float, optional If the function has", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"count\"]).values[self.alias][0] # ---# def", "For example, time = '03-11-1993' will filter the data when", "result if elem[2] != None ] ), ) executeSQL( \"SELECT", "== None) else str(elem[1]) new_column = \"COALESCE({}, DECODE({}, {}, NULL))\".format(", "if (isinstance(upper, (float, int))) else \"\" ) func = \"(CASE", "verticapy.plot import boxplot return boxplot(self, by, h, max_cardinality, cat_priority, ax=ax,", "of parameter 'labels' + 1 or parameter 'labels' must be", "max_cardinality, cat_priority, ax=ax, **style_kwds) # ---# def category(self): \"\"\" ---------------------------------------------------------------------------", "is in each of the vColumn records. The vColumn will", "self.parent See Also -------- vDataFrame.apply : Applies functions to the", "the One-Hot Encoding algorithm. Parameters ---------- prefix: str, optional Prefix", "query, cat = [], self.distinct() if len(cat) == 1: lp,", "of a response. \"\"\" if self.category() in [\"date\", \"float\"]: warning_message", "self.parent.format_colnames(of) from verticapy.plot import bar return bar(self, method, of, max_cardinality,", "AND {}) THEN {} ELSE NULL END)\".format( \"{}\", p_alpha, p_1_alpha,", "unit.upper(), start_or_end ) ) # ---# def spider( self, by:", "more information, see utilities.tablesample. See Also -------- vDataFrame.aggregate : Computes", "labels, [list]), (\"include_lowest\", include_lowest, [bool]), (\"right\", right, [bool]), ] )", "\\ \\____ \\ \\ \\/\\ \\ \\ \\ \\____ \\", "the MinMax (min and max). (x - min) / (max", "k most occurent elements, how often they occur, and other", "new_column = \"COALESCE({}, {})\".format(\"{}\", val) elif (len(by) == 1) and", "= quote_ident(self.alias) new_name = new_name.replace('\"', \"\") assert not (self.parent.is_colname_in(new_name)), NameError(", "parameter will increase the precision but will also increase the", "except: pass if method == \"robust_zscore\": self.catalog[\"median\"] = 0 self.catalog[\"mad\"]", "---# def round(self, n: int): \"\"\" --------------------------------------------------------------------------- Rounds the vColumn", "vColumn 'of'. sum : Sum of the vColumn 'of'. q%", "method=\"model\") result = [self.min()] + result + [self.max()] elif method", "Parameters ---------- add_history: bool, optional If set to True, the", "re.search( re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"', \"\"))), func ) ): max_floor = max( len(self.parent[column].transformations),", "transforming the values lesser than the lower bound to the", "BY COUNT(*)\" \" DESC OFFSET {1}) VERTICAPY_SUBTABLE) ORDER BY count", "if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp assert self.isnum() or self.isdate(),", "AS Q3, MAX({}) AS max FROM {}\".format( self.alias, self.alias, self.alias,", "the different categories to normalize.\", method=\"fetchall\", ) for i in", "the record will be sliced using the floor of the", "): \"\"\" --------------------------------------------------------------------------- Draws the range plot of the vColumn.", "the vColumn. Parameters ---------- x: float If the vColumn type", "= to_tablesample(query, title=title) result.values[\"index\"] += [\"total\"] result.values[\"non_events\"] += [sum(result[\"non_events\"])] result.values[\"events\"]", "parent and do not apply it. This parameter is very", "of the vColumn (delimited by an offset and a limit).", "0 for k in range(len(distinct_elements) - n): name = (", "verticapy.plot import hist return hist(self, method, of, max_cardinality, nbins, h,", "where = \"WHERE _verticapy_row_nb_ IN ({})\".format( \", \".join([\"1\"] + nth_elems", "{}, \"pearson\": {}, \"spearman\": {}, \"spearmand\": {}, \"kendall\": {}, \"cramer\":", "the vColumn records by an input value. \"\"\" check_types([(\"start\", start,", "print(\"The mean encoding was successfully done.\") return self.parent # ---#", "{} > {} THEN {} ELSE {} END)\".format( \"{}\", p_alpha,", "title=\"Computing the different categories {} to normalize.\".format( by[0] ), method=\"fetchall\",", "The category will be one of the following: date /", "i in range(parameters[\"n_estimators\"]) ] query = \"SELECT split_value FROM (SELECT", "is greater than the threshold, it will be considered as", "by = self.parent.format_colnames(by) columns = [self.alias, by] else: columns =", "the next whole number ln : natural logarithm log :", "if add_history: self.parent.__add_to_history__( \"[Drop]: vColumn {} was deleted from the", "[int, float]), ] ) if func not in (\"log\", \"mod\",", "self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by)", "breaks, [list]), (\"labels\", labels, [list]), (\"include_lowest\", include_lowest, [bool]), (\"right\", right,", "[int, float])]) query = \"SELECT * FROM {} WHERE {}", "= ( \"MAX({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by)", "---# def str_extract(self, pat: str): \"\"\" --------------------------------------------------------------------------- Extracts the regular", "in range(len(values[elem])): if isinstance(values[elem][i], decimal.Decimal): values[elem][i] = float(values[elem][i]) return tablesample(values)", "------- float mad See Also -------- vDataFrame.aggregate : Computes the", "self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) category = get_category_from_vertica_type(ctype=ctype) all_cols,", "values to compute the Equal Frequency discretization\" ) total, query,", "result = self.aggregate([\"count\", \"min\", \"max\"]) index = result.values[\"index\"] result =", "self.catalog[\"mean\"] = 0 self.catalog[\"std\"] = 1 elif method == \"minmax\":", "0 else \"\", self.alias ), method=\"fetchall\", ) values = {", "current axes. ax: Matplotlib axes object, optional The axes to", "are numerical. Optimized h will be computed if the parameter", "parameter 'numcol' must be defined. numerical : Uses popular numerical", "maximum See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "average of the response partitioned by the different vColumn categories.", ": value up to the next whole number cos :", "0 self.catalog[\"max\"] = 1 self.parent.__add_to_history__( \"[Normalize]: The vColumn '{}' was", "= result.values[\"index\"] result = result.values[self.alias] elif (method == \"cat_stats\") and", "Input number. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply", "the information will be stored in the vDataFrame history. Returns", "missing values).\\nNothing was filled.\".format( self.alias ) warnings.warn(warning_message, Warning) return self.parent", "{})\".format( self.alias, \", \".join(by) ), \"STDDEV({}) OVER (PARTITION BY {})\".format(", "Draws the donut chart of the vColumn based on an", "{} IS NOT NULL GROUP BY {} ORDER BY {}\".format(", ": Computes statistics of a numerical column for each vColumn", "\"SELECT PERCENTILE_CONT({}) WITHIN GROUP (ORDER BY {}) OVER (), PERCENTILE_CONT(1", "LIMIT 20\".format( self.alias, dtype, self.alias, self.parent.__genSQL__(), self.alias ) executeSQL(query, title=\"Testing", "See Also -------- vDataFrame[].fill_outliers : Fills the vColumn outliers using", "NULL values will not be considered during the computation. n:", "\"max_depth\": 8, \"nbins\": 100}) model.set_params(RFmodel_params) parameters = model.get_params() try: model.fit(tmp_view_name,", "NameError( f\"A vColumn has already the alias of one of", "str): \"\"\" --------------------------------------------------------------------------- Verifies if the regular expression is in", "self.parent.shape()[0] ) except: pass self.parent.__add_to_history__( \"[Discretize]: The vColumn {} was", "\"categorical\") ): query = \"\"\"(SELECT {0} || '', COUNT(*) FROM", "self.isdate(): min_date = self.min() table = \"(SELECT DATEDIFF('second', '{}'::timestamp, {})", ": trigonometric inverse sine atan : trigonometric inverse tangent cbrt", "kth most occurent element (ex: top1 for the mode) topk_percent", "{} WHERE {} IS NOT NULL ORDER BY {} DESC", ") query = \"SELECT {} FROM (SELECT {}, ROW_NUMBER() OVER", "int Number of digits to keep after the comma. Returns", "kwargs[\"legend\"] = True if not (\"figsize\" in kwargs): kwargs[\"figsize\"] =", "with 20 trees and a maximum depth of 10. response:", "# ---# def density( self, by: str = \"\", bandwidth:", "its abstractions. Parameters ---------- alias: str vColumn alias. transformations: list,", "converted to {}.\".format( self.alias, dtype ) ) return self.parent except", "True if the vColumn is numerical. \"\"\" return self.ctype().lower() in", "True, draw a Step Plot. ax: Matplotlib axes object, optional", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "during the computation. cat_stats : Computes statistics of a numerical", "timestamp...) or numerical. by: str, optional vColumn to use to", "like: even: CASE ... WHEN vColumn = argv[2 * i]", "---------- n: int Number of digits to keep after the", "WHEN \" + \" WHEN \".join(conditions) + \" END\" self.apply(func=expr)", "self.ctype(), self.category())] self.transformations += [(new_column, ctype, category)] try: sauv =", "percentile = 0.25) AS Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile =", "to compute the optimal h.\", method=\"fetchrow\", ) count, vColumn_min, vColumn_025,", "than quantile(1-alpha) will be filled. Returns ------- vDataFrame self.parent See", "\"[Apply]: The vColumn '{}' was transformed with the func 'x", "check_types([(\"x\", x, [int, float], (\"approx\", approx, [bool]))]) prefix = \"approx_\"", "add(self, x: float): \"\"\" --------------------------------------------------------------------------- Adds the input element to", "[ \"(SELECT READ_TREE(USING PARAMETERS model_name = '{}', tree_id = {},", "---# def cut( self, breaks: list, labels: list = [],", "binary to use iv_woe.\".format(y) ) response_cat = self.parent[y].distinct() response_cat.sort() assert", "were \" if total > 1 else \" was \"", "conj, ) ) else: if verticapy.options[\"print_info\"]: print(\"Nothing was filled.\") self.transformations", "See Also -------- vDataFrame.apply : Applies functions to the input", "BETWEEN {} AND {})\".format(self.alias, p_alpha, p_1_alpha) ) return self.parent #", "float]), (\"pie_type\", pie_type, [\"auto\", \"donut\", \"rose\"]), ] ) donut =", "\"''\"), value.replace(\"'\", \"''\") ) ) # ---# def str_slice(self, start:", "index_start += self.parent.shape()[0] if isinstance(index_stop, int): if index_stop < 0:", "self.alias ), method=\"fetchall\", ) values = { \"index\": [item[0] for", "self.aggregate(func=[\"unique\"]).values[self.alias][0] # ---# def pie( self, method: str = \"density\",", "'median'. Parameters ---------- approx: bool, optional If set to True,", "\\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\ \\_\\\\\"\\_\\", "the vColumn using the input list. Parameters ---------- breaks: list", "total) except Exception as e: self.transformations = [elem for elem", "{} largest elements.\".format(self.alias, n) return to_tablesample(query, title=title) # ---# def", "self.aggregate(func=[\"std\", \"avg\"]).transpose().values self.parent.filter( \"ABS({} - {}) / {} < {}\".format(", "bound itself. Parameters ---------- lower: float, optional Lower bound. upper:", "cmin, \"NULLIFZERO\" if (nullifzero) else \"\", cmax, cmin, ), \"float\",", "of seconds, otherwise it will represent a number. Returns -------", "quantile (ex: 50% for the median) prod : product range", "the above. The idea is simple: instead of moving #", "] if method != \"cat_stats\": values = { \"index\": [\"name\",", "\"pad\", \"bfill\", \"backfill\"): assert order_by, ParameterError( \"If the method is", "use_numbers_as_suffix: bool, optional Uses numbers as suffix instead of the", "func 'x -> {}'.\".format( copy_name.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) )", "vColumn using a user-defined encoding. vDataFrame[].discretize : Discretizes the vColumn.", "(x - avg) / std robust_zscore : Normalization using the", "method == \"minmax\": if n == 0: nullifzero = 0", "vColumn_min, vColumn_025, vColumn_075, vColumn_max = result sturges = max( float(vColumn_max", "(c) Copyright [2018-2022] Micro Focus or one of its affiliates.", "to the Matplotlib functions. Returns ------- ax Matplotlib axes object", "item in result], } return tablesample(values) # ---# def value_counts(self,", "---# def numh(self, method: str = \"auto\"): \"\"\" --------------------------------------------------------------------------- Computes", ": Computes the vDataFrame input aggregations. \"\"\" check_types([(\"approx\", approx, [bool])])", "# ---# def std(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "pow : number raised to the power of another number", "a response. \"\"\" import verticapy.stats as st return self.apply(func=st.decode(str_sql(\"{}\"), *argv))", "check_types([(\"x\", x, [int, float])]) assert x != 0, ValueError(\"Division by", "\"cut only works on numerical / date-like vColumns.\" ) assert", "have a numerical value\" ) lower_when = ( \"WHEN {}", "meaning global outlier). \"\"\" check_types( [ (\"alpha\", alpha, [int, float]),", "use_threshold: bool = True, alpha: float = 0.05 ): \"\"\"", "n smallest elements in the vColumn. Parameters ---------- n: int,", "vColumn type iqr : interquartile range kurtosis : kurtosis jb", "vColumn( name, parent=self.parent, transformations=transformations, catalog={ \"min\": 0, \"max\": 1, \"count\":", "x)) # ---# def drop(self, add_history: bool = True): \"\"\"", "vDataFrame self.parent See Also -------- vDataFrame.drop: Drops the input vColumns", "\"freedman_diaconis\", \"fd\", \"auto\"])] ) method = method.lower() if method ==", "use to partition the data. h: float, optional Interval width", "\"median\") else \"AVG\" if by == []: if fun ==", "{}({} - {})\".format( self.alias, cmin, \"NULLIFZERO\" if (nullifzero) else \"\",", "= 0, max_cardinality: int = 8, cat_priority: list = [],", "value. vDataFrame[].str_slice : Slices the vColumn. \"\"\" check_types([(\"pat\", pat, [str])])", "\"bool\" else: category, ctype = self.category(), self.ctype() copy_trans = [elem", "-= len(self.transformations) for k in range(max_floor): self.transformations += [(\"{}\", self.ctype(),", "are using. Returns ------- tablesample An object containing the result.", "isinstance(cat_priority, str) or not (isinstance(cat_priority, Iterable)): cat_priority = [cat_priority] check_types(", "result = [self.min()] + result + [self.max()] elif method ==", "statistics. \"\"\" return self.describe(method=\"categorical\", max_cardinality=k) # ---# def var(self): \"\"\"", "nbins. If empty, an optimized number of nbins will be", "method to 'numerical' if the vColumn is numerical , 'categorical'", "method=\"model\") raise drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") result = [self.min()] +", "'x' represents the second argument. Returns ------- vDataFrame self.parent See", "partition the data. method: str, optional The method to use", "self.parent[y].distinct() response_cat.sort() assert response_cat == [0, 1], TypeError( \"vColumn {}", "self.alias, self.alias, self.alias, self.alias, self.alias, table ) result = executeSQL(", "x)) # ---# def sum(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "Parameters ---------- to_replace: str Regular expression to replace. value: str,", "category. h: float, optional The interval size to convert to", "prefix_sep.replace('\"', \"_\") ) n = 1 if drop_first else 0", "the computation. n: int, optional Integer corresponding to the offset.", "in elem: if \"percent\" in elem: self.catalog[elem] = sauv[elem] elif", "names will be generated. include_lowest: bool, optional If set to", "\"SELECT COUNT({}) AS NAs, MIN({}) AS min, APPROXIMATE_PERCENTILE({} USING PARAMETERS", "elif method == \"minmax\": if n == 0: nullifzero =", "float]), (\"use_threshold\", use_threshold, [bool]), (\"threshold\", threshold, [int, float]), ] )", "type. Returns ------- str vColumn DB type. \"\"\" return self.transformations[-1][1].lower()", "elem in values: for i in range(len(values[elem])): if isinstance(values[elem][i], decimal.Decimal):", "/ (max - min) by: list, optional vColumns used in", "kernel. nbins: int, optional Maximum number of points to use", "/ **kwargs Any optional parameter to pass to the geopandas", "lower, [float, int]), (\"upper\", upper, [float, int])]) assert (lower !=", "0, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the histogram of", "= [self.alias] if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot", "OFFSET {} LIMIT 1\".format( self.alias, cast, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index, )", "1.\" ) index_stop = index.stop index_start = index.start if not", "# \\_______/ / / # ______ / / # \\", "/ # \\/ / / # / / # /", "dict = {}, response: str = \"\", return_enum_trans: bool =", "vDataFrame. Dropping a vColumn means simply not selecting it in", "(tuple, list)): xlim_tmp = [xlim] else: xlim_tmp = [] model", "\"cat_stats\") and (numcol != \"\"): numcol = self.parent.format_colnames(numcol) assert self.parent[numcol].category()", "Returns ------- tablesample An object containing the result. For more", "\"\"\" return self.aggregate([\"avg\"]).values[self.alias][0] mean = avg # ---# def bar(", "start, step)) # ---# def sub(self, x: float): \"\"\" ---------------------------------------------------------------------------", "self.alias, self.parent.__genSQL__(), by[0] ) result = executeSQL( query, title=\"Computing the", "the Random Forest on a response column to find the", "else \"NULL\", elem[2] if elem[2] != None else \"NULL\", )", "bool, optional If set to True, draw a Step Plot.", "FROM {} WHERE {} IS NOT NULL LIMIT 0\".format( func.replace(\"{}\",", "dummies ({name}).\\n\" \"It can be the result of using previously", "the record is greater than the threshold, it will be", "\" \"or simply because of ambiguous columns naming.\\nBy changing one", "\"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_SUBSTR({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\"))", "h: float, optional Interval width of the bar. If empty,", "of elements. same_width : Computes regular width bins. smart :", "assert nb != 0, Exception( \"Not enough values to compute", "return self.aggregate([\"stddev\"]).values[self.alias][0] stddev = std # ---# def store_usage(self): \"\"\"", "{} LIMIT 1\".format( alpha, self.alias, alpha, self.alias, self.parent.__genSQL__() ) p_alpha,", "input vColumns. vDataFrame.applymap : Applies a function to all the", "Parameters ---------- field: str The field to extract. It must", "'mad' (median absolute deviation). Returns ------- float mad See Also", "donut chart of the vColumn based on an aggregation. \"\"\"", "( \" WHERE {} IS NULL\".format(self.alias) if (category in (\"None\",", "= max(math.floor(best_h), 1) return best_h # ---# def nunique(self, approx:", "transformations=[item for item in self.transformations], catalog=self.catalog, ) setattr(self.parent, name, new_vColumn)", "using TS methods. Returns ------- vDataFrame self.parent See Also --------", "new_name = new_name.replace('\"', \"\") assert not (self.parent.is_colname_in(new_name)), NameError( f\"A vColumn", "def fill_outliers( self, method: str = \"winsorize\", threshold: float =", "== 1: lp, rp = \"(\", \")\" else: lp, rp", "(\"end_date\", end_date, [str, datetime.datetime, datetime.date]), (\"area\", area, [bool]), (\"step\", step,", "), title=\"Computing the different categories to normalize.\", method=\"fetchall\", ) for", "float = 0, nbins: int = -1, k: int =", "is returned. By setting this parameter to False, the function's", "vDataFrame history. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.drop:", "not empty, a copy will be created using the input", "== \"AVG\": val = self.avg() elif fun == \"MEDIAN\": val", "( ((distinct_count < max_cardinality + 1) and (method != \"numerical\"))", "software # distributed under the License is distributed on an", ") return self.parent # ---# def aggregate(self, func: list): \"\"\"", "at the final transformation. Returns ------- vDataFrame self.parent See Also", "order_by_ts ) if method in (\"mean\", \"median\") or isinstance(val, float):", "\"\"): \"\"\" --------------------------------------------------------------------------- Replaces the regular expression matches in each", "elem in all_partition: if len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations)", "'Others', SUM(count) FROM (SELECT COUNT(*) AS count\" \" FROM vdf_table", "method == \"robust_zscore\": if n > 0: warning_message = \"The", "rename(self, new_name: str): \"\"\" --------------------------------------------------------------------------- Renames the vColumn by dropping", "limit <= 0: limit = 0 limit = \" LIMIT", "NOT NULL \".format(self.alias) if (dropna) else \" \" result =", "vDataFrame[].apply : Applies a function to the vColumn. \"\"\" check_types(", "int/float, optional If the function has two arguments (example, power", "quantile(alpha) or greater than quantile(1-alpha) will be filled. Returns -------", "n >= 1, ParameterError(\"Parameter 'n' must be greater or equal", "/ / / # |____/ / / # _____________ /", "assert self.parent[response].isnum(), TypeError( \"The response column must be numerical to", "{}\\n{} feature{}created: {}\".format( self.alias, len(all_new_features), conj, \", \".join(all_new_features) ) +", "------- vDataFrame self.parent See Also -------- vDataFrame[].str_count : Computes the", ") method = method.lower() assert (method != \"cat_stats\") or (numcol),", "cutomized aggregation (ex: AVG(column1) + 5). ax: Matplotlib axes object,", "'[{};{}]' \".format( \"{}\", result[i - 1], result[i], result[i - 1],", "self.parent.__genSQL__(), \" UNION ALL \".join(query) ) title = \"Describes the", "ALL (SELECT 'Others', SUM(count) FROM (SELECT COUNT(*) AS count\" \"", "query = \"SELECT {}{} FROM {}{} OFFSET {} LIMIT 1\".format(", "casting.\") self.transformations += [ ( \"{}::{}\".format(\"{}\", dtype), dtype, get_category_from_vertica_type(ctype=dtype), )", "/ {}) * {} || ';' || (FLOOR({} / {})", "+= [elem for elem in order_by] for elem in all_partition:", "skewness : skewness sum : sum std : standard deviation", "---# def head(self, limit: int = 5): \"\"\" --------------------------------------------------------------------------- Returns", "int = 6, h: float = 0, pie_type: str =", "str = \"second\", start: bool = True): \"\"\" --------------------------------------------------------------------------- Slices", "considered as an outlier. use_threshold: bool, optional Uses the threshold", "[item[1] for item in query_result] index = [\"unique\", \"count\"] +", "[ ( \"({} - {}) / {}({} - {})\".format( \"{}\",", "= ( sys.getsizeof(self) + sys.getsizeof(self.alias) + sys.getsizeof(self.transformations) + sys.getsizeof(self.catalog) )", "2, ParameterError( \"Length of parameter 'breaks' must be greater or", "MIN({3}{4}) AS min, APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.1)", "# / # ____________ ______ # / __ `\\ /", "If the vDataFrame represents the entire relation, a vColumn can", "distribution. mean : Average of the vColumn 'of'. min :", "be binary to use iv_woe.\".format(y) ) response_cat = self.parent[y].distinct() response_cat.sort()", "def get_dummies( self, prefix: str = \"\", prefix_sep: str =", "ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the vColumn Density Plot.", "greater or equals to 2 in case of discretization using", "pie chart of the vColumn based on an aggregation. Parameters", "from verticapy.plot import ts_plot return ts_plot( self, ts, by, start_date,", "in (\"float\", \"int\") # ---# def iv_woe(self, y: str, nbins:", "vColumn records. vDataFrame[].extract : Extracts the regular expression in each", "deviation approx_unique : approximative cardinality count : number of non-missing", "{} AS {}, {} AS ord, {}::int AS {} FROM", "(trans, \"varchar\", \"text\") if return_enum_trans: return trans else: self.transformations +=", "occurent element (ex: top1 for the mode) topk_percent : kth", "count\" \" FROM vdf_table WHERE {0} IS NOT NULL GROUP", "Uses method 'same_width' for numerical vColumns, cast the other types", "[ ( \"method\", method, [ \"auto\", \"mode\", \"0ifnull\", \"mean\", \"avg\",", "total < int(float(count / int(nbins))) * int(nbins): nth_elems += [str(total)]", "'cat_stats'. Returns ------- tablesample An object containing the result. For", "------- ax Matplotlib axes object See Also -------- vDataFrame[].bar :", "distributions as percents. Parameters ---------- k: int, optional Number of", ") ): max_floor = max( len(self.parent[column].transformations), max_floor ) except: pass", "{}'.\".format( self.alias.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) ) return self.parent except", "THEN 0 ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS woe, CASE", "or self.isdate(), ParameterError( \"numh is only available on type numeric|date\"", "expression matches in each of the vColumn record by an", "index, non_events, events, pt_non_events, pt_events, CASE WHEN non_events = 0", "deviation max : maximum mean : average median : median", "atan : trigonometric inverse tangent cbrt : cube root ceil", "describe( self, method: str = \"auto\", max_cardinality: int = 6,", "Computes the vDataFrame input aggregations. \"\"\" check_types( [ (\"method\", method,", "self.catalog[\"count\"] = int(sauv[\"count\"]) + total self.catalog[\"percent\"] = ( 100 *", "---------- x: float A float between 0 and 1 that", "self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, -({}), {})\".format(x, \"{}\")) else: return self.apply(func=\"{} -", "optimized number of bins will be computed. h: float, optional", "query += [lp + tmp_query + rp] query = \"WITH", "[ (\"by\", by, [str]), (\"kernel\", kernel, [\"gaussian\", \"logistic\", \"sigmoid\", \"silverman\"]),", "One-Hot Encoding. vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.", "+ [item[0] for item in query_result] else: result = (", "({}) VERTICAPY_SUBTABLE WHERE split_value IS NOT NULL GROUP BY 1", "( sauv[\"max\"] - sauv[\"min\"] ) except: pass if method ==", "on an aggregation. \"\"\" check_types( [ (\"method\", method, [str]), (\"of\",", "[str]), (\"drop_first\", drop_first, [bool]), (\"use_numbers_as_suffix\", use_numbers_as_suffix, [bool]), ] ) distinct_elements", "0\".format( func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) except: ctype", "performance can drastically decrease. Returns ------- float quantile (or approximate", "the vColumn using a time series rule. Parameters ---------- length:", "\"ln\", \"log\", \"log10\", \"mod\", \"pow\", \"round\", \"sign\", \"sin\", \"sinh\", \"sqrt\",", "empty, an optimized number of nbins will be computed. h:", "[\"total\"] result.values[\"non_events\"] += [sum(result[\"non_events\"])] result.values[\"events\"] += [sum(result[\"events\"])] result.values[\"pt_non_events\"] += [\"\"]", "prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) name = ( name.replace(\" \",", "partition the data. bandwidth: float, optional The bandwidth of the", "if isinstance(pie_type, str): pie_type = pie_type.lower() check_types( [ (\"method\", method,", "if method == \"auto\": pre_comp = self.parent.__get_catalog_value__(self.alias, \"numh\") if pre_comp", "p_1_alpha) ) return self.parent # ---# def dropna(self): \"\"\" ---------------------------------------------------------------------------", "of, max_cardinality, nbins, h, ax=ax, **style_kwds) # ---# def iloc(self,", "return_enum_trans, [bool]), ] ) method = method.lower() if self.isnum() and", "by: list = [], order_by: list = [], ): \"\"\"", "to False, the function's performance can drastically decrease. Returns -------", "vDataFrame.drop: Drops the input vColumns from the vDataFrame. \"\"\" check_types([(\"add_history\",", "except: model.drop() raise # ---# def describe( self, method: str", "data. bandwidth: float, optional The bandwidth of the kernel. kernel:", "= \" WHERE {} IS NOT NULL\".format(self.alias) if (dropna) else", "------- float/str maximum See Also -------- vDataFrame.aggregate : Computes the", "self.drop(add_history=False) parent.__add_to_history__( \"[Rename]: The vColumn {} was renamed '{}'.\".format(old_name, new_name)", "being the vColumn cardinality). Returns ------- vDataFrame self.parent See Also", "itself. Parameters ---------- lower: float, optional Lower bound. upper: float,", "vColumn DB type. Returns ------- str vColumn DB type. \"\"\"", "See Also -------- vDataFrame[].describe : Computes the vColumn descriptive statistics.", "prefix = ( self.alias.replace('\"', \"\") + prefix_sep.replace('\"', \"_\") if not", "the search. See Also -------- vDataFrame.isin : Looks if some", "self.parent force_columns = [ column for column in self.parent._VERTICAPY_VARIABLES_[\"columns\"] ]", "\"{}\") self.apply(func=func) return self.parent # ---# def count(self): \"\"\" ---------------------------------------------------------------------------", "for item in result], } return tablesample(values) # ---# def", "in [\"ffill\", \"pad\", \"bfill\", \"backfill\"]: all_partition += [elem for elem", "(Constant Interpolation). ffill : Propagation of the first element (Constant", ") category = get_category_from_vertica_type(ctype=ctype) all_cols, max_floor = self.parent.get_columns(), 0 for", "return \"({} - {}) / ({})\".format(self.alias, med, mad) else: final_transformation", "the time series. \"\"\" check_types( [ (\"ts\", ts, [str]), (\"by\",", "'kurtosis'. Returns ------- float kurtosis See Also -------- vDataFrame.aggregate :", "the parameter is empty or invalid. max_cardinality: int/tuple, optional Maximum", "= kwargs[\"column\"] else: check = False if check: self.parent.are_namecols_in(column) column", "( \"end_date\", end_date, [str, datetime.datetime, datetime.date, int, float], ), (\"plot_median\",", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "return parent # ---# def drop_outliers( self, threshold: float =", "in copy_trans] raise QueryError(\"{}\\nAn Error happened during the filling.\".format(e)) if", "IS NOT NULL LIMIT 20\".format( self.alias, dtype, self.alias, self.parent.__genSQL__(), self.alias", "'' ELSE '{}' END)\".format( bin_spatial_to_str(self.category()), \", \".join( [ \"'{}'\".format(str(elem).replace(\"'\", \"''\"))", "min_date, self.alias, self.alias, self.parent.__genSQL__() ) query = \"SELECT COUNT({}) AS", "# ---# def category(self): \"\"\" --------------------------------------------------------------------------- Returns the category of", "\"{}\", partition_by, order_by_ts ) if method in (\"mean\", \"median\") or", "): n = len(result) trans = \"(CASE \" for i", "'robust_zscore' is available only if the parameter 'by' is empty\\nIf", "customized expression. \"\"\" if isinstance(func, str_sql): func = str(func) check_types([(\"func\",", "vColumn using 'sem' (standard error of mean). Returns ------- float", ") for idx, elem in enumerate(result): result[idx][0] = ( \"NULL\"", "in range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += [", "+ prefix_sep.replace('\"', \"_\") if not (prefix) else prefix.replace('\"', \"_\") +", "equal to 0, an optimised interval will be computed. nbins:", "def range_plot( self, ts: str, q: tuple = (0.25, 0.75),", "some specific records are in the vDataFrame. \"\"\" if isinstance(val,", "parent, vDataFrame : Parent of the vColumn. transformations, str :", "bar. If empty, an optimized h will be computed. ax:", "p_1_alpha, ) mean_alpha, mean_1_alpha = [ item[0] for item in", "int number of non-Missing elements. See Also -------- vDataFrame.aggregate :", "\"regr_slope\": {}, \"regr_sxx\": {}, \"regr_sxy\": {}, \"regr_syy\": {}, } for", "vColumn. labels: list, optional Labels used to name the new", "memory usage. Returns ------- float vColumn memory usage (byte) See", "+= [\"\"] result.values[\"pt_events\"] += [\"\"] result.values[\"woe\"] += [\"\"] result.values[\"iv\"] +=", "return self elif method == \"minmax\": if n == 0:", "str, optional The method to use to discretize the vColumn.", "Adds a new vColumn labeled with 0 and 1 (1", "response_cat.sort() assert response_cat == [0, 1], TypeError( \"vColumn {} must", "max(sturges, fd) self.parent.__update_catalog__({\"index\": [\"numh\"], self.alias: [best_h]}) if self.category() == \"int\":", "tmp_view_name = gen_tmp_name(schema=schema, name=\"view\") tmp_model_name = gen_tmp_name(schema=schema, name=\"model\") assert nbins", "and (self.parent[by[0]].nunique() < 50): try: if fun == \"MEDIAN\": fun", "[elem[0] for elem in result] except: drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\")", "If set to True, the lowest element of the list", "avg # ---# def bar( self, method: str = \"density\",", "model = KernelDensity( name, bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=xlim_tmp, store=False, )", "BY {} ORDER BY _verticapy_cnt_ DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER", "vColumn to use to compute the aggregation. max_cardinality: int, optional", "\"\" if (method in (\"ffill\", \"pad\")) else \" DESC\" partition_by", "aggregations. \"\"\" return self.aggregate([\"avg\"]).values[self.alias][0] mean = avg # ---# def", ") return self.parent except Exception as e: raise QueryError( \"{}\\nError", ": Applies a function to the input vColumn. \"\"\" return", "Function to use to transform the vColumn. abs : absolute", "> 0: column = args[0] elif \"column\" in kwargs: column", "cmin, ) else: final_transformation = [ ( \"({} - {})", "query_result] index = [\"unique\", \"count\"] + [item[0] for item in", "check_types( [ (\"prefix\", prefix, [str]), (\"prefix_sep\", prefix_sep, [str]), (\"drop_first\", drop_first,", "input expression. \"\"\" self.parent.filter(\"{} IS NOT NULL\".format(self.alias)) return self.parent #", "will be computed. pie_type: str, optional The type of pie", "return hist(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds) #", "] query = \"SELECT split_value FROM (SELECT split_value, MAX(weighted_information_gain) FROM", "use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values p_alpha, p_1_alpha = ( -threshold", "the vColumn. \"\"\" check_types( [ (\"length\", length, [int, float]), (\"unit\",", "offers beautiful graphical options. # # VerticaPy aims to do", "use to transform the vColumn. abs : absolute value acos", "input aggregations. \"\"\" return self.aggregate([\"stddev\"]).values[self.alias][0] stddev = std # ---#", "is simple: instead of moving # data around for processing,", "title=\"Computing the average of the {}'s lower and upper outliers.\".format(", "self.alias, \", \".join(by) ), ) else: cmax, cmin = (", "/ / # ______ / / # \\ / /", "expr, [str]), (\"by\", by, [list]), (\"order_by\", order_by, [list]), ] )", "float(vColumn_max - vColumn_min) / int(math.floor(math.log(count, 2) + 2)), 1e-99, )", "if the vColumn is numerical or of type date like.", "How the intervals should be closed. If set to True,", "int = 1): \"\"\" --------------------------------------------------------------------------- Returns the nth most occurent", "or not (is_numeric) or (method == \"categorical\") ): query =", "query += ( \"UNION ALL (SELECT 'Others', SUM(count) FROM (SELECT", "10. response: str, optional Response vColumn when method is set", "\", {})\".format(len(distinct_elements)) self.transformations += [(expr, \"int\", \"int\")] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.catalog[\"count\"]", "str) or not (isinstance(val, Iterable)): val = [val] val +=", "method=\"view\") drop(tmp_model_name, method=\"model\") raise drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") result =", "YEAR Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].slice :", "x ORDER BY ord\".format( self.alias, query, ) title = \"Computing", "catalog: dict, optional Catalog where each key corresponds to an", "using the input functions. Parameters ---------- func: list List of", "the 3rd. area: bool, optional If set to True, draw", "] ) distinct_elements = self.distinct() if distinct_elements not in ([0,", "in each of the vColumn records by an input value.", "elif method == \"zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"mean\"]) /", "elif n == 1: try: result = executeSQL( \"SELECT {},", "of elements to display. offset: int, optional Number of elements", "{}))\".format( \"{}\", \"{}\", partition_by, order_by_ts ) if method in (\"mean\",", "( \"AVG({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ),", "the vColumn. \"\"\" return self.iloc(limit=limit, offset=-1) # ---# def topk(self,", "vColumn to the vDataFrame by using an advanced analytical function", "\"\" ) order_by_ts = \", \".join([quote_ident(column) + desc for column", "------- vDataFrame self.parent See Also -------- vDataFrame[].dropna : Drops the", "sem : standard error of the mean skewness : skewness", "to 2 in case of discretization using the method 'same_freq'\"", "will also increase the time of the learning and scoring", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" check_types( [", "response) query = [ \"(SELECT READ_TREE(USING PARAMETERS model_name = '{}',", "return self.aggregate(func=[\"approx_unique\"]).values[self.alias][0] else: return self.aggregate(func=[\"unique\"]).values[self.alias][0] # ---# def pie( self,", "the power of another number round : rounds a value", "vColumn by keeping only the input number of digits after", "will return the mode of the vColumn. Returns ------- str/float/int", "of using previously the method on the vColumn \" \"or", "using the following mapping:{}\".format( self.alias, text_info ) ) return self.parent", "max FROM vdf_table\"\"\".format( category, self.alias, self.parent.shape()[0], numcol, cast, ) tmp_query", "result] except: drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") raise drop(tmp_view_name, method=\"view\") drop(tmp_model_name,", "like). The vColumn will be transformed. Parameters ---------- field: str", "(\"legend\" in kwargs): kwargs[\"legend\"] = True if not (\"figsize\" in", "number. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply :", "SECOND / TIME ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK", "iv_woe(self, y: str, nbins: int = 10): \"\"\" --------------------------------------------------------------------------- Computes", "self.alias ) values = to_tablesample(query, title=title).values elif ( ((distinct_count <", "> max_floor: max_floor = len(self.parent[elem].transformations) max_floor -= len(self.transformations) for k", "\\/_____/ \\/_/ \\/_/ \\/_/ \\/_/ # # # ---# class", "\\ \\ \\ \\____ \\ \\ \\_\\ \\ \\ \\", "date / int / float / text / binary /", "or h <= 0: if nbins <= 0: h =", "avg, stddev = ( \"AVG({}) OVER (PARTITION BY {})\".format( self.alias,", "float aad See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "\"''\")) for elem in distinct ] ), bin_spatial_to_str(self.category()), new_category.replace(\"'\", \"''\"),", ") else: new_column = \"COALESCE({}, {}({}) OVER (PARTITION BY {}))\".format(", "( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) result", "result = executeSQL( query, title=\"Different aggregations to compute the optimal", "ASC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias, n ) title =", "plot_median, [bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) from verticapy.plot", "return store_usage # ---# def str_contains(self, pat: str): \"\"\" ---------------------------------------------------------------------------", "self.aggregate(func=[\"std\", \"avg\"]).transpose().values p_alpha, p_1_alpha = ( -threshold * result[\"std\"][0] +", "cmin = ( \"MAX({}) OVER (PARTITION BY {})\".format( self.alias, \",", "the alias {name}.\\nBy changing the parameter 'name', you'll be able", "in self.transformations] total = self.count() if method not in [\"mode\",", "Warning) return self.parent if isinstance(val, str): val = val.replace(\"'\", \"''\")", "custom_lines = [] columns = self.parent[by].distinct() for idx, column in", "self.parent.quantile([alpha, 1 - alpha], [self.alias]) .transpose() .values[self.alias] ) self.parent.filter( \"({}", "None, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the vColumn Density", "== None) else \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) ) result[idx][1] = \"NULL\" if", "of the {}'s lower and upper outliers.\".format( self.alias ), method=\"fetchall\",", "distinct categories of {}.\".format(self.alias), method=\"fetchall\", ) return [item for sublist", "\"(SELECT DATEDIFF('second', '{}'::timestamp, {}) AS {} FROM {}) VERTICAPY_OPTIMAL_H_TABLE\".format( min_date,", "!\".format( self.alias ) warnings.warn(warning_message, Warning) return self elif method ==", "empty, a copy will be created using the input Name.", "\"(CASE \" for i in range(1, n): trans += \"WHEN", "abs(self.count() - total) except Exception as e: self.transformations = [elem", "(delimited by an offset and a limit). Parameters ---------- limit:", "= ( \"label_encode is only available for categorical variables.\" )", "in by: if len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations) max_floor", "self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) category = get_category_from_vertica_type(ctype=ctype) all_cols, max_floor", "if (method == \"median\") else \"AVG\" if by == []:", "== None: pass elif math.isnan(result[i][2]): result[i][2] = None avg =", "+ result + [self.max()] elif method == \"topk\": assert k", "True, the information will be stored in the vDataFrame history.", "if self.isbool(): warning_message = \"Normalize doesn't work on booleans\".format(self.alias) warnings.warn(warning_message,", "(\"by\", by, [str]), (\"method\", method, [str]), (\"of\", of, [str]), (\"max_cardinality\",", "+ sys.getsizeof(self.transformations) + sys.getsizeof(self.catalog) ) for elem in self.catalog: total", "1], TypeError( \"vColumn {} must be binary to use iv_woe.\".format(y)", "datetime.datetime, datetime.date] = \"\", area: bool = False, step: bool", "apply(self, func: str, copy_name: str = \"\"): \"\"\" --------------------------------------------------------------------------- Applies", "the Gaussian distribution to define the outliers. After normalizing the", "\"{}\", avg, \"NULLIFZERO\" if (nullifzero) else \"\", stddev ), \"float\",", "(\"unit\", unit, [str]), (\"start\", start, [bool]), ] ) start_or_end =", "outlier). \"\"\" if isinstance(method, str): method = method.lower() check_types( [", "limitations under the License. # # |_ |~) _ _|", "if total > 0: try: if \"count\" in sauv: self.catalog[\"count\"]", "BY {})\".format( self.alias, \", \".join(by) ), \"MIN({}) OVER (PARTITION BY", "it. This parameter is very useful for testing to be", "func: str, Function in pure SQL used to transform the", "(method == \"categorical\"): result = self.aggregate([\"count\", \"min\", \"max\"]) index =", "ZScores. threshold: float, optional Uses the Gaussian distribution to define", "the vColumn with the One-Hot Encoding algorithm. Parameters ---------- prefix:", "vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}') >", "1] (n being the vColumn cardinality). Returns ------- vDataFrame self.parent", "---------- pat: str Regular expression. Returns ------- vDataFrame self.parent See", "value: str, optional New value. Returns ------- vDataFrame self.parent See", "\\-./\\ \\ \\ \\ \\-. \\ # \\ \\__| \\", "self.parent # ---# def distinct(self, **kwargs): \"\"\" --------------------------------------------------------------------------- Returns the", "the n smallest elements in the vColumn. Parameters ---------- n:", "AS Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.75) AS Q3,", "query = \"SELECT {}, MIN(ord) AS ord, SUM(1 - {})", "AS woe, CASE WHEN non_events = 0 OR events =", "self, by: str = \"\", h: float = 0, max_cardinality:", "OVER ()) AS pt_non_events, events / NULLIFZERO(SUM(events) OVER ()) AS", "\"avg\", \"median\"): fun = \"MEDIAN\" if (method == \"median\") else", "'sum'. Returns ------- float sum See Also -------- vDataFrame.aggregate :", "= kurtosis # ---# def label_encode(self): \"\"\" --------------------------------------------------------------------------- Encodes the", "the descriptive statistics of {}.\".format(self.alias), method=\"fetchall\", ) result = [distinct_count,", "vColumn is missing. Returns ------- vDataFrame self.parent See Also --------", "= \"float\", \"float\" elif method == \"0ifnull\": category, ctype =", "to varchar. same_freq : Computes bins with the same number", "(\"h\", h, [int, float]), ] ) if of: self.parent.are_namecols_in(of) of", "self.parent.__add_to_history__( \"[Add Copy]: A copy of the vColumn {} named", "series. \"\"\" check_types( [ (\"ts\", ts, [str]), (\"by\", by, [str]),", "- {}) / {}({})\".format( \"{}\", avg, \"NULLIFZERO\" if (nullifzero) else", "normalized with the method '{}'.\".format( self.alias, method ) ) else:", "list List of the different records. For example, to check", "> 0 # ---# def __repr__(self): return self.head(limit=verticapy.options[\"max_rows\"]).__repr__() # ---#", "return self.aggregate([\"kurtosis\"]).values[self.alias][0] kurt = kurtosis # ---# def label_encode(self): \"\"\"", "if the method is 'cat_stats'\" ) distinct_count, is_numeric, is_date =", "values. \"\"\" if isinstance(by, str): by = [by] if isinstance(order_by,", "to train a Random Forest with 20 trees and a", "It tells the predictive power of an independent variable in", "the partition. return_trans: bool, optimal If set to True, the", "using a Z-Score - The Standard Deviation is null !\".format(", "Dropping a vColumn means simply not selecting it in the", "\"WITH vdf_table AS (SELECT * FROM {}) (SELECT AVG({}) FROM", "Returns ------- float kurtosis See Also -------- vDataFrame.aggregate : Computes", "self.parent.shape()[0] ) except: pass total = int(total) conj = \"s", "max_floor -= len(self.transformations) if copy_name: self.add_copy(name=copy_name) for k in range(max_floor):", "to True, draw an Area Plot. step: bool, optional If", "[float, int])]) assert (lower != None) or (upper != None),", ") except: pass self.parent.__add_to_history__( \"[Discretize]: The vColumn {} was discretized.\".format(self.alias)", "e: raise ConversionError( \"{}\\nThe vColumn {} can not be converted", "not (self.parent.is_colname_in(name)), NameError( f\"A vColumn has already the alias of", "round : rounds a value to a specified number of", "--------------------------------------------------------------------------- Discretizes the vColumn using the input list. Parameters ----------", "try: model.fit(tmp_view_name, [self.alias], response) query = [ \"(SELECT READ_TREE(USING PARAMETERS", "One hot encoder was applied to the vColumn {}\\n{} feature{}created:", "n: int, optional Offset. Returns ------- tablesample An object containing", "ctype, category)] try: sauv = {} for elem in self.catalog:", "method not in [\"mode\", \"0ifnull\"]: max_floor = 0 all_partition =", "def head(self, limit: int = 5): \"\"\" --------------------------------------------------------------------------- Returns the", "'nbins' must be greater or equals to 2 in case", "query=query, title=\"Computing the average of the {}'s lower and upper", "bool, optional If set to True, the approximate quantile is", "using 'var' (Variance). Returns ------- float var See Also --------", "/ # |____/ / / # _____________ / / #", "if (nullifzero) else \"\", stddev ), \"float\", \"float\", ) ]", "= [], self.distinct() if len(cat) == 1: lp, rp =", "VERTICAPY_SUBTABLE {}\".format( self.alias, self.alias, self.alias, self.parent.__genSQL__(), self.alias, where, ) result", ") self.add_copy(new_name) parent = self.drop(add_history=False) parent.__add_to_history__( \"[Rename]: The vColumn {}", "= int(total) conj = \"s were \" if total >", "and scoring phases. xlim: tuple, optional Set the x limits", "upper=None): \"\"\" --------------------------------------------------------------------------- Clips the vColumn by transforming the values", "copy_name: self.add_copy(name=copy_name) for k in range(max_floor): self.parent[copy_name].transformations += [ (\"{}\",", "if not (prefix) else prefix.replace('\"', \"_\") + prefix_sep.replace('\"', \"_\") )", "# \\ / # \\/ # _ # \\ /", "new_vColumn) setattr(self.parent, name.replace('\"', \"\"), new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] all_new_features +=", "= int(float(count / int(nbins))) assert nb != 0, Exception( \"Not", "aggregations could work if it is part of the DB", "input vColumns. \"\"\" if isinstance(cat_priority, str) or not (isinstance(cat_priority, Iterable)):", "of the input vColumn based on an aggregation. Parameters ----------", "self.transformations += [(new_column, ctype, category)] try: sauv = {} for", "custom_lines, columns, title=by, loc=\"center left\", bbox_to_anchor=[1, 0.5], ) ax.set_xlabel(self.alias) return", "the Median will be drawn. ax: Matplotlib axes object, optional", "None else \"NULL\", ) for elem in result if elem[2]", "{}).\".format(self.alias, y) result = to_tablesample(query, title=title) result.values[\"index\"] += [\"total\"] result.values[\"non_events\"]", "{} FROM {} LIMIT 1\".format( new_column.format(self.alias), self.parent.__genSQL__() ), print_time_sql=False, )", "**style_kwds, ) # ---# def std(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "method.lower() if method == \"auto\": pre_comp = self.parent.__get_catalog_value__(self.alias, \"numh\") if", "vColumn. transformations, str : List of the different transformations. \"\"\"", "-------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" check_types(", "\"\" query = \"SELECT {} AS {}, COUNT(*) AS _verticapy_cnt_,", "bins with the same number of elements. same_width : Computes", "{} {} largest elements.\".format(self.alias, n) return to_tablesample(query, title=title) # ---#", "dict = {} ): self.parent, self.alias, self.transformations = ( parent,", "[str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [list]), (\"h\", h, [list,", "--------------------------------------------------------------------------- Returns True if the vColumn is boolean, False otherwise.", "a customized expression. \"\"\" check_types([(\"name\", name, [str])]) name = quote_ident(name.replace('\"',", "== 1) else str(int(n)) if isinstance(top, decimal.Decimal): top = float(top)", "[item[0] for item in result], \"count\": [int(item[1]) for item in", "str, optional The method used for the plot. gaussian :", "vColumn. vDataFrame[].str_replace : Replaces the regular expression matches in each", "{}.\".format(self.alias), method=\"fetchfirstelem\", ) self.parent.__update_catalog__( {\"index\": [\"store_usage\"], self.alias: [store_usage]} ) return", "pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": if not (dropna) and (pre_comp != None):", "try: query = \"SELECT {}::{} AS {} FROM {} WHERE", "for column in by]) ) if (by) else \"\" )", "is_date = ( self.nunique(), self.isnum(), self.isdate(), ) if (is_date) and", "is null !\".format( self.alias ) warnings.warn(warning_message, Warning) return self elif", "x ORDER BY verticapy_agg DESC\".format( self.alias, bin_spatial_to_str(self.category(), self.alias), self.alias, kwargs[\"agg\"],", "using the input method. \"\"\" check_types([(\"lower\", lower, [float, int]), (\"upper\",", "depending on the input method. Parameters ---------- method: str, optional", "an aggregation. \"\"\" check_types( [ (\"by\", by, [str]), (\"kernel\", kernel,", "numerical column for each vColumn category. In this case, the", "DESC\" partition_by = ( \"PARTITION BY {}\".format( \", \".join([quote_ident(column) for", ": Regular pie chart. donut : Donut chart. rose :", "if result[i][2] == None: pass elif math.isnan(result[i][2]): result[i][2] = None", "mean_alpha = \"NULL\" if mean_1_alpha == None: mean_alpha = \"NULL\"", "method 'robust_zscore' is available only if the parameter 'by' is", "return_trans: bool = False ): \"\"\" --------------------------------------------------------------------------- Normalizes the input", "vColumn nth most occurent element. See Also -------- vDataFrame.aggregate :", "{} GROUP BY {}\".format( by[0], self.alias, self.alias, self.parent.__genSQL__(), by[0], ),", "{op1} {column} AND {column} {op2} '{second_elem}' THEN '{label}'\" ] expr", "\"same_freq\": assert nbins >= 2, ParameterError( \"Parameter 'nbins' must be", "set to True, the method will return the transformation used", "def aggregate(self, func: list): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "= 0.25) AS 'approx_25%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile =", "split_value::float\".format( \" UNION ALL \".join(query), nbins - 1 ) result", "empty in case of discretization using the method 'smart'.\" )", "AS non_events, SUM({}) AS events FROM ({}) x GROUP BY", "Returns ------- list Distinct caterogies of the vColumn. See Also", "number of digits after the comma. Parameters ---------- n: int", "float]), ] ) if use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values self.parent.filter(", "if mean_alpha == None: mean_alpha = \"NULL\" if mean_1_alpha ==", ".values[self.alias] ) result = [distinct_count] + result index = [", ") transformations = self.transformations + [(expr, \"bool\", \"int\")] new_vColumn =", "kwargs[\"column\"] else: check = False if check: self.parent.are_namecols_in(column) column =", "= [ \"unique\", \"count\", \"mean\", \"std\", \"min\", \"approx_25%\", \"approx_50%\", \"approx_75%\",", "\\_______/ / / # ______ / / # \\ /", "is equal to 0, an optimised interval will be computed.", "/ int(nbins))) assert nb != 0, Exception( \"Not enough values", "query = \"SELECT split_value FROM (SELECT split_value, MAX(weighted_information_gain) FROM ({})", "Returns True if the vColumn is boolean. vDataFrame[].isnum : Returns", "optional If not empty, a copy will be created using", "= method.lower() if self.isnum() and method == \"smart\": schema =", "copy_trans] for elem in sauv: self.catalog[elem] = sauv[elem] return self.parent", "series rule. Parameters ---------- length: int Slice size. unit: str,", "LIMIT 1\".format( cmax, cmin, self.parent.__genSQL__() ), print_time_sql=False, ) except: cmax,", "vDataFrame[].nlargest : Returns the n largest vColumn elements. \"\"\" check_types([(\"n\",", "elem[1] != None else \"NULL\", ) for elem in result", "to use iv_woe.\".format(y) ) response_cat = self.parent[y].distinct() response_cat.sort() assert response_cat", "), method=\"fetchall\", ) cmin = \"DECODE({}, {}, NULL)\".format( by[0], \",", "elif method == \"topk\": assert k >= 2, ParameterError( \"Parameter", "for elem in result] ), ) executeSQL( \"SELECT {} FROM", "h > 0.01: h = round(h, 2) elif h >", "True if the vColumn is boolean. See Also -------- vDataFrame[].isdate", "(\"start\", start, [bool]), ] ) start_or_end = \"START\" if (start)", "vColumn using the mean encoding of a response. \"\"\" if", "query, ) title = \"Computing WOE & IV of {}", "method will return the mode of the vColumn. Returns -------", "percent, AVG({3}{4}) AS mean, STDDEV({3}{4}) AS std, MIN({3}{4}) AS min,", "a response. \"\"\" if self.category() in [\"date\", \"float\"]: warning_message =", "assert response, ParameterError( \"Parameter 'response' can not be empty in", "---------- method: str, optional The method to use to aggregate", "TypeError( \"The response column must be numerical to use a", "the vColumn (only if the vColumn type is date like).", "\\ \\ \\____ \\ \\ \\/\\ \\ \\ \\ \\____", "= 6, numcol: str = \"\" ): \"\"\" --------------------------------------------------------------------------- Aggregates", "\"[Normalize]: The vColumn '{}' was normalized with the method '{}'.\".format(", "OFFSET {1}) VERTICAPY_SUBTABLE) ORDER BY count DESC\" ).format(self.alias, max_cardinality +", "{}::int AS {} FROM {}\".format( trans, self.alias, self.alias, y, y,", "val) elif expr: new_column = \"COALESCE({}, {})\".format(\"{}\", expr) elif method", "records by an input value. vDataFrame[].str_slice : Slices the vColumn.", "'{}' was transformed with the func 'x -> {}'.\".format( self.alias.replace('\"',", "the vColumn is numerical or of type date like. Optimized", "(int(sauv[\"count\"]) + total) / self.parent.shape()[0] ) except: pass total =", "str, q: tuple = (0.25, 0.75), start_date: Union[str, datetime.datetime, datetime.date]", "Parameters ---------- length: int Slice size. unit: str, optional Slice", "offset=-1) # ---# def topk(self, k: int = -1, dropna:", "({3}{4} USING PARAMETERS percentile = 0.5) AS 'approx_50%', APPROXIMATE_PERCENTILE ({3}{4}", "compute the aggregation. max_cardinality: int, optional Maximum number of the", "Also -------- vDataFrame[].hist : Draws the histogram of the vColumn", "1 or parameter 'labels' must be empty.\" ) conditions, column", "Gaussian distribution to identify outliers. After normalizing the data (Z-Score),", ") ] if mean_alpha == None: mean_alpha = \"NULL\" if", "APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.25) AS Q1, APPROXIMATE_PERCENTILE({} USING", "5, offset: int = 0): \"\"\" --------------------------------------------------------------------------- Returns a part", "count, 100 * COUNT({1}) / {2} AS percent, AVG({3}{4}) AS", "AS mean, STDDEV({3}{4}) AS std, MIN({3}{4}) AS min, APPROXIMATE_PERCENTILE ({3}{4}", "{\"index\": [\"store_usage\"], self.alias: [store_usage]} ) return store_usage # ---# def", "one of its affiliates. # Licensed under the Apache License,", "'{}', '{}')\".format( \"{}\", length, unit.upper(), start_or_end ) ) # ---#", "* ({})\".format(\"{}\", x)) # ---# def nlargest(self, n: int =", "vColumn using 'min' (Minimum). Returns ------- float/str minimum See Also", "function has two arguments (example, power or mod), 'x' represents", "method ) ) else: raise TypeError(\"The vColumn must be numerical", "= [self.alias, by] else: columns = [self.alias] if of: self.parent.are_namecols_in(of)", ": Computes the Information Value (IV) Table. \"\"\" check_types([(\"y\", y,", "def count(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'count' (Number", "bool = False, step: bool = False, ax=None, **style_kwds, ):", "issue.\" ) self.add_copy(new_name) parent = self.drop(add_history=False) parent.__add_to_history__( \"[Rename]: The vColumn", "method == \"0ifnull\": new_column = \"DECODE({}, NULL, 0, 1)\" elif", "alias. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.add_copy :", "nbins >= 2, ParameterError( \"Parameter 'nbins' must be greater or", "self.aggregate([\"mad\"]).values[self.alias][0] # ---# def max(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "using 'unique' (cardinality). Parameters ---------- approx: bool, optional If set", "n = \"\" if (n == 1) else str(int(n)) if", "def plot( self, ts: str, by: str = \"\", start_date:", "\"density\", of: str = \"\", max_cardinality: int = 6, nbins:", "in func) or ( re.search( re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"', \"\"))), func ) ):", "upper, upper) if (isinstance(upper, (float, int))) else \"\" ) func", "if (category in (\"None\", None)) else \" WHERE {} =", "(x - median) / (1.4826 * mad) minmax : Normalization", "set to True, the intervals will be closed on the", "Dropping a vColumn can make the vDataFrame \"heavier\" if it", "- median) / (1.4826 * mad) minmax : Normalization using", "{}\".format( \", \".join([quote_ident(column) for column in by]) ) if (by)", "str, optional If not empty, a copy will be created", "] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot", "name=\"model\") assert nbins >= 2, ParameterError( \"Parameter 'nbins' must be", "cosine cosh : hyperbolic cosine cot : trigonometric cotangent exp", "minmax : Normalization using the MinMax (min and max). (x", "bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.alias, self.alias, self.alias, ) else: query", "a mean encoding\" ) max_floor = len(self.parent[response].transformations) - len( self.transformations", "--------------------------------------------------------------------------- Returns True if the vColumn category is date, False", "type. \"\"\" return self.transformations[-1][1].lower() dtype = ctype # ---# def", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate(func=[\"prod\"]).values[self.alias][0] prod", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode : Encodes", "type. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.astype :", "or greater than quantile(1-alpha) will be dropped. Returns ------- vDataFrame", "\"(\", \")\" else: lp, rp = \"\", \"\" for category", "== \"categorical\")) or ( method == \"is_numeric\" ): self.parent.__update_catalog__({\"index\": index,", "Error happened during the filling.\".format(e)) if total > 0: try:", ") for k in range(len(distinct_elements) - n): name = (", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].slice : Slices", "final transformation. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode", "method to use to aggregate the data. count : Number", "(\"by\", by, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int,", "[str(total)] total += nb where = \"WHERE _verticapy_row_nb_ IN ({})\".format(", "---# def hist( self, method: str = \"density\", of: str", "{}) / ({})\".format(self.alias, med, mad) else: final_transformation = [ (", "[self.alias] if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import", "user-specified rule. Parameters ---------- val: int/float/str, optional Value to use", "transformations. \"\"\" # # Special Methods # # ---# def", "\\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\", "\"The method 'robust_zscore' is available only if the parameter 'by'", "\"sqrt\", \"tan\", \"tanh\", ], ), (\"x\", x, [int, float]), ]", "in distinct ] ), bin_spatial_to_str(self.category()), new_category.replace(\"'\", \"''\"), ), \"varchar\", \"text\",", "identify outliers. After normalizing the data (Z-Score), if the absolute", "bool = True, right: bool = True, ): \"\"\" ---------------------------------------------------------------------------", "seen as one column of that relation. vColumns simplify several", "if not (dropna) and (pre_comp != None): return pre_comp assert", "an offset and a limit). Parameters ---------- limit: int, optional", "0 self.catalog[\"std\"] = 1 elif method == \"minmax\": self.catalog[\"min\"] =", "occur, and other statistical information. Parameters ---------- k: int, optional", "limit: int, optional Number of elements to display. Returns -------", "vDataFrame[].fill_outliers : Fills the vColumn outliers using the input method.", "only works on numerical / date-like vColumns.\" ) assert len(breaks)", "---# def discretize( self, method: str = \"auto\", h: float", "If set to True, the approximate median is returned. By", "self.alias, self.alias, self.parent.__genSQL__() ) query = \"SELECT COUNT({}) AS NAs,", "the parameter 'new_name', you'll be able to solve this issue.\"", "MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE WHERE split_value IS NOT NULL GROUP", "like (date, datetime ...), the parameter 'x' will represent the", "check_types( [(\"method\", method, [\"sturges\", \"freedman_diaconis\", \"fd\", \"auto\"])] ) method =", "outliers.\".format( self.alias ), method=\"fetchall\", ) ] if mean_alpha == None:", "a RF Classifier otherwise. Example: Write {\"n_estimators\": 20, \"max_depth\": 10}", "[list, float, int]), ] ) if by: self.parent.are_namecols_in(by) by =", ": Encodes the vColumn with user defined Encoding. vDataFrame[].get_dummies :", "== \"categorical\"): result = self.aggregate([\"count\", \"min\", \"max\"]) index = result.values[\"index\"]", "It supports the # entire data science life cycle, uses", "\"_\") if not (prefix) else prefix.replace('\"', \"_\") + prefix_sep.replace('\"', \"_\")", "[ \"abs\", \"acos\", \"asin\", \"atan\", \"cbrt\", \"ceil\", \"cos\", \"cosh\", \"cot\",", "vColumn using 'kurtosis'. Returns ------- float kurtosis See Also --------", "( re.search( re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"', \"\"))), func ) ): max_floor = max(", "# __ __ ______ ______ __ __ __ __ __", "int))) else \"\" ) func = \"(CASE {}{}ELSE {} END)\".format(lower_when,", "NOT NULL ORDER BY {} DESC LIMIT {}\".format( self.parent.__genSQL__(), self.alias,", "[str]), (\"nbins\", nbins, [int, float]), ( \"method\", method, [\"auto\", \"smart\",", "self.parent.__genSQL__() ) query = \"SELECT COUNT({}) AS NAs, MIN({}) AS", "= None, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the vColumn", "'name' must not be empty\" ) assert not (self.parent.is_colname_in(name)), NameError(", "root ceil : value up to the next whole number", "int, optional Number of nbins. If empty, an optimized number", "+ index, \"value\": [self.alias, self.ctype()] + result, } if ((is_date)", ": Returns True if the vColumn category is date. \"\"\"", "/ # / / # / / # \\ /", "NULL))\".format( \"{}\", by[0], \", \".join( [\"{}, {}\".format(elem[0], elem[1]) for elem", "[str(count)]) ) query = \"SELECT {} FROM (SELECT {}, ROW_NUMBER()", "self.parent.__update_catalog__( {\"index\": [\"top{}\".format(n)], self.alias: [top]} ) return top # ---#", "float average See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "func=\"REGEXP_COUNT({}, '{}') > 0\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def", "used as categorical (No h will be picked or computed)", "it in the final generated SQL code. Note: Dropping a", "\"\"\" --------------------------------------------------------------------------- Rounds the vColumn by keeping only the input", "column to find the most relevant interval to use for", "self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) nullifzero, n = 1, len(by) if", "pre_comp = self.parent.__get_catalog_value__(self.alias, \"top\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": if not", "== 0: op1, close_l = \"<\", \"]\" if labels: label", "self.alias, ) else: query = \"SELECT {} FROM (SELECT {}", "expr: str, optional SQL expression. by: list, optional vColumns used", "limit). Parameters ---------- limit: int, optional Number of elements to", "datetime.date]), (\"end_date\", end_date, [str, datetime.datetime, datetime.date]), (\"area\", area, [bool]), (\"step\",", "{}, \"spearmand\": {}, \"kendall\": {}, \"cramer\": {}, \"biserial\": {}, \"regr_avgx\":", "\"Parameter 'k' must be greater or equals to 2 in", "NULL value. winsorize : Clips the vColumn using as lower", "and 2 to be used as categorical (No h will", "return self.aggregate([\"max\"]).values[self.alias][0] # ---# def mean_encode(self, response: str): \"\"\" ---------------------------------------------------------------------------", "[list]), (\"include_lowest\", include_lowest, [bool]), (\"right\", right, [bool]), ] ) assert", "GROUP BY {} ORDER BY {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(),", "/ / / # |______ / / / # |____/", "self.alias.replace('\"', \"\") ) ) # ---# def apply_fun(self, func: str,", "self.aggregate([\"sum\"]).values[self.alias][0] # ---# def tail(self, limit: int = 5): \"\"\"", "for elem in sauv: self.catalog[elem] = sauv[elem] return self.parent #", "(method != \"numerical\")) or not (is_numeric) or (method == \"categorical\")", "an aggregation. \"\"\" check_types( [ (\"by\", by, [str]), (\"method\", method,", "(Variance). Returns ------- float var See Also -------- vDataFrame.aggregate :", "{} FROM (SELECT {} AS {}, {} AS verticapy_agg FROM", "See Also -------- vDataFrame[].nsmallest : Returns the n smallest elements", "args[0] elif \"column\" in kwargs: column = kwargs[\"column\"] else: check", "breaks must be equal to the length of parameter 'labels'", "vColumn and it returns the new vDataFrame of the search.", "def sub(self, x: float): \"\"\" --------------------------------------------------------------------------- Subtracts the input element", "\"License\"); # You may not use this file except in", "ts = self.parent.format_colnames(ts) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from", "self.apply(func=\"{} / ({})\".format(\"{}\", x)) # ---# def drop(self, add_history: bool", "4) elif h > 0.000001: h = round(h, 6) if", "executeSQL( query=query, title=\"Computing the distinct categories of {}.\".format(self.alias), method=\"fetchall\", )", "kurt = kurtosis # ---# def label_encode(self): \"\"\" --------------------------------------------------------------------------- Encodes", "value down to the next whole number ln : natural", "method, [\"zscore\", \"robust_zscore\", \"minmax\"]), (\"by\", by, [list]), (\"return_trans\", return_trans, [bool]),", "cosine asin : trigonometric inverse sine atan : trigonometric inverse", "check_types( [ (\"breaks\", breaks, [list]), (\"labels\", labels, [list]), (\"include_lowest\", include_lowest,", "/ YEAR Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].slice", "list, optional vColumns used in the partition. order_by: list, optional", "See Also -------- vDataFrame[].isbool : Returns True if the vColumn", "1\") where = \" WHERE {} IS NOT NULL \".format(self.alias)", "using the input method. Parameters ---------- method: str, optional The", "\"''\"), k) ] text_info += \"\\t{} => {}\".format(distinct_elements[k], k) expr", "\"\"\" --------------------------------------------------------------------------- Returns the k most occurent elements and their", "sys total = ( sys.getsizeof(self) + sys.getsizeof(self.alias) + sys.getsizeof(self.transformations) +", "if elem[1] != None else \"NULL\", ) for elem in", "\"cat_stats\") or (numcol), ParameterError( \"The parameter 'numcol' must be a", "= -1, dropna: bool = True): \"\"\" --------------------------------------------------------------------------- Returns the", "transform the vColumn. The function variable must be composed of", "\".join(expr) + \", {})\".format(len(distinct_elements)) self.transformations += [(expr, \"int\", \"int\")] self.parent.__update_catalog__(erase=True,", "n == 0: nullifzero = 0 avg, stddev = self.aggregate([\"avg\",", "\"issue.\" ) for k in range(len(distinct_elements) - n): name =", "the data. method: str, optional The method to use to", "title=\"Testing the Type casting.\") self.transformations += [ ( \"{}::{}\".format(\"{}\", dtype),", "if not (dropna): n = \"\" if (n == 1)", "sauv[\"min\"] ) except: pass if method == \"robust_zscore\": self.catalog[\"median\"] =", "See Also -------- vDataFrame[].decode : Encodes the vColumn with a", "(\"numcol\", numcol, [str]), ] ) method = method.lower() assert (method", "vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) assert x != 0,", "vColumn missing values. \"\"\" if isinstance(by, str): by = [by]", "] ) if use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values p_alpha, p_1_alpha", "!= \"VERTICAPY_NOT_PRECOMPUTED\": if not (dropna) and (pre_comp != None): return", "__|_. _ _ |_) # \\/ (/_| | |(_(_|| \\/", "str = \"density\", of: str = \"\", max_cardinality: Union[int, tuple]", "= False, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the range", "solve this \" \"issue.\" ) for k in range(len(distinct_elements) -", "be a cutomized aggregation (ex: AVG(column1) + 5). ax: Matplotlib", "self, method, of, max_cardinality, h, donut, rose, ax=None, **style_kwds, )", "Response vColumn. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode", "Returns the head of the vColumn. vDataFrame[].tail : Returns the", "y: str, nbins: int = 10): \"\"\" --------------------------------------------------------------------------- Computes the", "!= None) or (upper != None), ParameterError( \"At least 'lower'", "= \"SELECT {}, {}({}) FROM {} GROUP BY {};\".format( by[0],", "'labels' must be empty.\" ) conditions, column = [], self.alias", "\"''\")) ) # ---# def str_replace(self, to_replace: str, value: str", "def str_extract(self, pat: str): \"\"\" --------------------------------------------------------------------------- Extracts the regular expression", "TypeError( \"vColumn {} must be binary to use iv_woe.\".format(y) )", "See Also -------- vDataFrame.topk : Returns the vColumn most occurent", "h, ax=ax, **style_kwds, ) # ---# def std(self): \"\"\" ---------------------------------------------------------------------------", "to keep after the comma. Returns ------- vDataFrame self.parent See", "float, optional Uses the Gaussian distribution to define the outliers.", "self.alias), self.alias, self.parent.shape()[0], self.parent.__genSQL__(), dropna, self.alias, topk, ) result =", "+= [self.alias] if add_history: self.parent.__add_to_history__( \"[Drop]: vColumn {} was deleted", "Draws the Bar Chart of the input vColumns based on", "if idx == 0 and include_lowest: op1, close_l = \"<=\",", "whereas one vColumn can only have one parent. catalog: dict,", "-------- vDataFrame[].ctype : Returns the vColumn database type. \"\"\" return", "e: self.transformations = [elem for elem in copy_trans] raise QueryError(\"{}\\nAn", "If not empty, a copy will be created using the", "1: lp, rp = \"(\", \")\" else: lp, rp =", "method on the vColumn \" \"or simply because of ambiguous", "response. \"\"\" check_types( [ (\"RFmodel_params\", RFmodel_params, [dict]), (\"return_enum_trans\", return_enum_trans, [bool]),", "ParameterError( \"If the method is in ffill|pad|bfill|backfill then 'order_by' must", "stored in Vertica, taking advantage Vertica’s # speed and built-in", "Attributes ---------- alias, str : vColumn alias. catalog, dict :", "by grouping by elements, please use a method in zscore|minmax\"", "length: int Slice size. unit: str, optional Slice size unit.", "\"regr_intercept\": {}, \"regr_r2\": {}, \"regr_slope\": {}, \"regr_sxx\": {}, \"regr_sxy\": {},", "CASE ... WHEN vColumn = argv[2 * i] THEN argv[2", "expr = \", \".join(expr) + \", {})\".format(len(distinct_elements)) self.transformations += [(expr,", "--------------------------------------------------------------------------- Returns True if the vColumn is numerical, False otherwise.", "as upper bound quantile(1-alpha) if 'use_threshold' is set to False", "self.alias, self.alias, self.alias, self.parent.__genSQL__(), self.alias, where, ) result = executeSQL(", "# \\/_/ \\/_____/ \\/_____/ \\/_____/ \\/_____/ \\/_/ \\/_/ \\/_/ \\/_/", "greater than November 1993 the 3rd. area: bool, optional If", "equal to the length of parameter 'labels' + 1 or", "len(self.parent[elem].transformations) max_floor -= len(self.transformations) for k in range(max_floor): self.transformations +=", "add_copy(self, name: str): \"\"\" --------------------------------------------------------------------------- Adds a copy vColumn to", "Exception as e: self.transformations = [elem for elem in copy_trans]", "of the vColumns to use to sort the data when", "that represents the quantile. For example: 0.25 represents Q1. approx:", "i in range(len(result)): if result[i][2] == None: pass elif math.isnan(result[i][2]):", "The interval size to convert to use to convert the", "(\"drop_first\", drop_first, [bool]), (\"use_numbers_as_suffix\", use_numbers_as_suffix, [bool]), ] ) distinct_elements =", "{}) * {} || ';' || (FLOOR({} / {}) *", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"sem\"]).values[self.alias][0] # ---# def skewness(self):", "Applies a function to the input vColumn. \"\"\" check_types([(\"n\", n,", "self.alias, cast, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index, ) return executeSQL( query=query, title=\"Getting", "NOT NULL LIMIT 0\".format( func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\",", "of the vColumn. The category will be one of the", "+= ( \" WHERE {} IS NULL\".format(self.alias) if (category in", "the vColumn is null, 1 otherwise. expr: str, optional SQL", "return self.apply(func=\"ROUND({}, {})\".format(\"{}\", n)) # ---# def sem(self): \"\"\" ---------------------------------------------------------------------------", "return self.aggregate(func=[\"prod\"]).values[self.alias][0] prod = product # ---# def quantile(self, x:", "), method=\"fetchall\", ) ] if mean_alpha == None: mean_alpha =", "check_types( [ (\"by\", by, [str]), (\"kernel\", kernel, [\"gaussian\", \"logistic\", \"sigmoid\",", "the vColumn by dropping the current vColumn and creating a", "(isinstance(lower, (float, int))) else \"\" ) upper_when = ( \"WHEN", "(use_numbers_as_suffix) else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) assert not", "-------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" check_types([(\"approx\",", "[int, float])]) return self.apply(func=\"SUBSTR({}, {}, {})\".format(\"{}\", start, step)) # ---#", "BY {}))\".format( \"{}\", fun, \"{}\", \", \".join(by) ) elif method", "COUNT({1}) AS count, 100 * COUNT({1}) / {2} AS percent,", "categorical. numcol: str, optional Numerical vColumn to use when the", "with the func 'x -> {}'.\".format( self.alias.replace('\"', \"\"), func.replace(\"{}\", \"x\"),", "= 0 if index_start < 0: index_start += self.parent.shape()[0] if", "self.min()) * 1.01 / nbins if h > 0.01: h", "the {}'s lower and upper outliers.\".format( self.alias ), method=\"fetchall\", )", "in sauv: self.catalog[\"count\"] = int(sauv[\"count\"]) + total self.catalog[\"percent\"] = (", "allow slicing having steps different than 1.\" ) index_stop =", "ValueError( \"vColumn doesn't allow slicing having steps different than 1.\"", "result] ), ) executeSQL( \"SELECT {} FROM {} LIMIT 1\".format(", "--------------------------------------------------------------------------- Draws the histogram of the vColumn based on an", "--------------------------------------------------------------------------- Rounds the vColumn by keeping only the input number", "copy of the vColumn {} named {} was added to", "percentile = 0.75) AS 'approx_75%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile", "the outliers by the NULL value. winsorize : Clips the", "smallest elements in the vColumn. \"\"\" check_types([(\"n\", n, [int, float])])", "list, *args): \"\"\" --------------------------------------------------------------------------- Looks if some specific records are", "self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) except: ctype = get_data_types(", "= gen_colors() if not xlim: xmin = self.min() xmax =", "\"pad\")) else \" DESC\" partition_by = ( \"PARTITION BY {}\".format(", "records are in the vColumn and it returns the new", "query, title=\"Computing the top{} categories of {}.\".format( k if k", "check_types( [ (\"method\", method, [\"auto\", \"numerical\", \"categorical\", \"cat_stats\"]), (\"max_cardinality\", max_cardinality,", "be able to solve this \" \"issue.\" ) for k", "self.alias, response ) ) if verticapy.options[\"print_info\"]: print(\"The mean encoding was", "Returns ------- float mad See Also -------- vDataFrame.aggregate : Computes", "Matplotlib axes object See Also -------- vDataFrame.plot : Draws the", "_ /~\\ _ |. # |_)\\/ |_)(_|(_|| \\_/|_|(_||| # /", "get_category_from_vertica_type(ctype=ctype) all_cols, max_floor = self.parent.get_columns(), 0 for column in all_cols:", "= argv[2 * i] THEN argv[2 * i + 1]", "input list. Parameters ---------- breaks: list List of values used", "xlim custom_lines = [] columns = self.parent[by].distinct() for idx, column", "\"\"\" --------------------------------------------------------------------------- Returns the nth most occurent element. Parameters ----------", "--------------------------------------------------------------------------- Computes the optimal vColumn bar width. Parameters ---------- method:", "# ---# def iloc(self, limit: int = 5, offset: int", "vDataFrame[].tail : Returns the tail of the vColumn. \"\"\" check_types([(\"limit\",", "---# def _repr_html_(self): return self.head(limit=verticapy.options[\"max_rows\"])._repr_html_() # ---# def __setattr__(self, attr,", "the function's performance can drastically decrease. Returns ------- int vColumn", "if isinstance(xlim, (tuple, list)): xlim_tmp = [xlim] else: xlim_tmp =", "\"auto\", \"mode\", \"0ifnull\", \"mean\", \"avg\", \"median\", \"ffill\", \"pad\", \"bfill\", \"backfill\",", "= 'tabular'))\".format( tmp_model_name, i ) for i in range(parameters[\"n_estimators\"]) ]", ") distinct = self.topk(k).values[\"index\"] trans = ( \"(CASE WHEN {}", "compute the optimal h.\", method=\"fetchrow\", ) count, vColumn_min, vColumn_025, vColumn_075,", "---# def str_count(self, pat: str): \"\"\" --------------------------------------------------------------------------- Computes the number", "OVER ({} ORDER BY {}))\".format( \"{}\", \"{}\", partition_by, order_by_ts )", ": average median : median min : minimum mode :", "except Exception as e: self.transformations = [elem for elem in", "comma. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply :", "in order_by] + by) by = self.parent.format_colnames(by) if method ==", "of {}.\".format(self.alias), method=\"fetchall\", ) return [item for sublist in query_result", "= True if not (\"figsize\" in kwargs): kwargs[\"figsize\"] = (14,", "+= [(\"{}\", self.ctype(), self.category())] self.transformations += [(func, ctype, category)] self.parent.__update_catalog__(erase=True,", "+ \", {})\".format(len(distinct_elements)) self.transformations += [(expr, \"int\", \"int\")] self.parent.__update_catalog__(erase=True, columns=[self.alias])", "FROM {} GROUP BY {}\".format( by[0], self.alias, self.alias, self.parent.__genSQL__(), by[0],", "0: nullifzero = 0 cmin, cmax = self.aggregate([\"min\", \"max\"]).values[self.alias] if", "the vColumn with user defined Encoding. vDataFrame[].get_dummies : Encodes the", "Adds a new vColumn to the vDataFrame by using an", "vDataFrame[].decode : Encodes the vColumn using a user-defined encoding. vDataFrame[].discretize", "number cos : trigonometric cosine cosh : hyperbolic cosine cot", "------- vDataFrame self.parent See Also -------- vDataFrame.outliers : Computes the", "multiple statistical aggregations: min, max, median, unique... depending on the", "self.apply( func=\"REGEXP_COUNT({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def str_extract(self,", "Returns the k most occurent elements and their distributions as", "\"\"\" return self.transformations[-1][2] # ---# def clip(self, lower=None, upper=None): \"\"\"", "of: str = \"\", max_cardinality: int = 6, nbins: int", "self.parent.shape()[0] query = \"SELECT {}{} FROM {}{} OFFSET {} LIMIT", "model.fit(self.parent.__genSQL__(), [self.alias]).plot( ax=ax, **style_kwds ) model.drop() return result except: model.drop()", "nbins=nbins, xlim=xlim_tmp, store=False, ) try: result = model.fit(self.parent.__genSQL__(), [self.alias]).plot( ax=ax,", "\"\"\" --------------------------------------------------------------------------- Computes the Information Value (IV) / Weight Of", "== \"categorical\") ): query = \"\"\"(SELECT {0} || '', COUNT(*)", "IQR / n ** (1 / 3)] sturges : Sturges", "normalize {} using the MIN and the MAX. MAX =", "= self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) try: if \"count\" in sauv: self.catalog[\"count\"]", "input vColumn. \"\"\" check_types( [ (\"breaks\", breaks, [list]), (\"labels\", labels,", "tmp_model_name, i ) for i in range(parameters[\"n_estimators\"]) ] query =", "median, unique... depending on the input method. Parameters ---------- method:", "binary / spatial / uuid / undefined Returns ------- str", "optional If set to True, the approximate cardinality is returned.", "matplotlib.lines import Line2D colors = gen_colors() if not xlim: xmin", "None) else \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) ) result[idx][1] = \"NULL\" if (elem[1]", "False if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import", "value\" ) lower_when = ( \"WHEN {} < {} THEN", "WHEN {} IN ({}) THEN {} || '' ELSE '{}'", "the vColumn. The vColumn will be transformed. Parameters ---------- pat:", "k=nbins, new_category=\"Others\", return_enum_trans=True, )[0].replace(\"{}\", self.alias) query = \"SELECT {} AS", "[int, float]), (\"pie_type\", pie_type, [\"auto\", \"donut\", \"rose\"]), ] ) donut", "\"SELECT {} AS {}, COUNT(*) AS _verticapy_cnt_, 100 * COUNT(*)", "or agreed to in writing, software # distributed under the", "self.alias, \", \".join(by) ), \"STDDEV({}) OVER (PARTITION BY {})\".format( self.alias,", "\"Can not normalize {} using the MIN and the MAX.", "in values: for i in range(len(values[elem])): if isinstance(values[elem][i], decimal.Decimal): values[elem][i]", ") top = None if not (result) else result[0][0] if", "\"\", h: float = 0, max_cardinality: int = 8, cat_priority:", "[ (\"method\", method, [str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [int,", "( \"WHEN {} > {} THEN {} \".format(\"{}\", upper, upper)", "or equals to 2 in case of discretization using the", "is empty\\nIf you want to normalize by grouping by elements,", "and std). (x - avg) / std robust_zscore : Normalization", "(\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [list]), (\"h\", h, [list, float,", "= [xlim] else: xlim_tmp = [] model = KernelDensity( name,", "the response partitioned by the different vColumn categories. Parameters ----------", "x: float A float between 0 and 1 that represents", "'x' will represent the number of seconds, otherwise it will", "UNION ALL (SELECT AVG({}) FROM vdf_table WHERE {} > {})\".format(", "raise TypeError(\"The vColumn must be numerical for Normalization\") return self.parent", "of = self.parent.format_colnames(of) from verticapy.plot import pie return pie( self,", "/~\\ _ |. # |_)\\/ |_)(_|(_|| \\_/|_|(_||| # / #", "\"spearman\": {}, \"spearmand\": {}, \"kendall\": {}, \"cramer\": {}, \"biserial\": {},", ": Computes the number of matches for the regular expression", "# # __ __ ______ ______ __ __ __ __", "if not (\"cmap\" in kwargs): from verticapy.plot import gen_cmap kwargs[\"cmap\"]", "Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].get_dummies : Encodes the", "of a division operation pow : number raised to the", "1\".format( self.alias, y, y, query, ) query = \"SELECT {},", "if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import spider", "check_types([(\"y\", y, [str]), (\"nbins\", nbins, [int])]) self.parent.are_namecols_in(y) y = self.parent.format_colnames(y)", "aad See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "vDataFrame self.parent See Also -------- vDataFrame[].str_count : Computes the number", "'by' is empty\\nIf you want to normalize by grouping by", "(dropna) else \"\" query = \"SELECT {} AS {}, COUNT(*)", "* i + 1] ... END odd : CASE ...", ") result = executeSQL( query=query, title=\"Computing the optimized histogram nbins", "the vColumn type is date like (date, datetime ...), the", "{} FROM (SELECT {}, COUNT(*) AS _verticapy_cnt_ FROM {}{}GROUP BY", "- {})\".format( self.alias, cmin, \"NULLIFZERO\" if (nullifzero) else \"\", cmax,", "{} missing value{} filled.\".format( total, self.alias, conj, ) ) else:", "min, APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.1) AS 'approx_10%',", "check_types( [ (\"by\", by, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\",", "the parent vDataFrame. This parameter is used for testing purpose.", "average of the {}'s lower and upper outliers.\".format( self.alias ),", "distinct elements for vColumns 1 and 2 to be used", "\"vColumn {} must be binary to use iv_woe.\".format(y) ) self.parent[y].distinct()", "by {}.\".format( numcol, self.alias ) values = to_tablesample(query, title=title).values elif", "self.isnum(), self.isdate(), ) if (is_date) and not (method == \"categorical\"):", "\"\"\" return self.aggregate([\"sum\"]).values[self.alias][0] # ---# def tail(self, limit: int =", "== \"int\": h = int(max(math.floor(h), 1)) floor_end = -1 if", "the method is in ffill|pad|bfill|backfill then 'order_by' must be a", "permissions and # limitations under the License. # # |_", "= { \"index\": [\"name\", \"dtype\"] + index, \"value\": [self.alias, self.ctype()]", "self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias) delattr(self.parent, self.alias) except: self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"] += [self.alias] if add_history: self.parent.__add_to_history__(", "except: cmax, cmin = ( \"MAX({}) OVER (PARTITION BY {})\".format(", "_ _ |_) # \\/ (/_| | |(_(_|| \\/ #", "mean = avg # ---# def bar( self, method: str", "[] prefix = ( self.alias.replace('\"', \"\") + prefix_sep.replace('\"', \"_\") if", "BY 2 DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY split_value::float\".format( \"", "outliers threshold. Values lesser than quantile(alpha) or greater than quantile(1-alpha)", ": Discretizes the vColumn. vDataFrame[].get_dummies : Encodes the vColumn with", "is boolean. See Also -------- vDataFrame[].isdate : Returns True if", "self.parent.format_colnames(by) from verticapy.plot import gen_colors from matplotlib.lines import Line2D colors", "---# def clip(self, lower=None, upper=None): \"\"\" --------------------------------------------------------------------------- Clips the vColumn", "\"\"\" return self.aggregate([\"min\"]).values[self.alias][0] # ---# def mode(self, dropna: bool =", "new_name: str): \"\"\" --------------------------------------------------------------------------- Renames the vColumn by dropping the", "empty\" ) assert not (self.parent.is_colname_in(name)), NameError( f\"A vColumn has already", ": Clips the vColumn using as lower bound quantile(alpha) and", "] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) from verticapy.plot import range_curve_vdf", "{} > {} THEN {} \".format(\"{}\", upper, upper) if (isinstance(upper,", "(\"order_by\", order_by, [list]), ] ) method = method.lower() self.parent.are_namecols_in([elem for", "n: int, optional Integer corresponding to the offset. For example,", "equals to 2 in case of discretization using the method", "the vColumn category is date. See Also -------- vDataFrame[].isbool :", "\"\"), func.replace(\"{}\", \"x\"), ) ) else: for k in range(max_floor):", "by: list, optional vColumns used in the partition. return_trans: bool,", "the vColumns to use to sort the data when using", "float, int]), ] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by)", "# \\ / / / # \\/ / / #", "\"\"): numcol = self.parent.format_colnames(numcol) assert self.parent[numcol].category() in (\"float\", \"int\"), TypeError(", "otherwise. expr: str, optional SQL expression. by: list, optional vColumns", "Also -------- vDataFrame.memory_usage : Returns the vDataFrame memory usage. \"\"\"", "if method == \"auto\": method = \"mean\" if (self.isnum() and", "elements to skip. Returns ------- tablesample An object containing the", "be picked or computed) nbins: int, optional Number of bins.", "Geospatial object. Parameters ---------- *args / **kwargs Any optional parameter", "0\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def str_count(self, pat: str):", "Slices the vColumn. The vColumn will be transformed. Parameters ----------", "# ---# def __init__( self, alias: str, transformations: list =", "on an aggregation. \"\"\" if isinstance(pie_type, str): pie_type = pie_type.lower()", "in (\"freedman_diaconis\", \"fd\"): best_h = fd else: best_h = max(sturges,", "is a Python library with scikit-like functionality for conducting #", "for k in range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations", "IGNORE NULLS) OVER ({} ORDER BY {}))\".format( \"{}\", \"{}\", partition_by,", "KernelDensity schema = verticapy.options[\"temp_schema\"] if not (schema): schema = \"public\"", "be trained if the response is numerical (except ints and", "Q1. approx: bool, optional If set to True, the approximate", "else: result = ( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False ) .transpose()", "div(self, x: float): \"\"\" --------------------------------------------------------------------------- Divides the vColumn by the", "optional Number of elements to skip. Returns ------- tablesample An", "arguments (example, power or mod), 'x' represents the second argument.", "style_kwds, idx)[\"color\"], lw=4, ), ] ax.set_title(\"KernelDensity\") ax.legend( custom_lines, columns, title=by,", "{} must be binary to use iv_woe.\".format(y) ) response_cat =", ") elif self.isdate(): min_date = self.min() table = \"(SELECT DATEDIFF('second',", "(labels), ParameterError( \"Length of parameter breaks must be equal to", "return self.parent # ---# def nsmallest(self, n: int = 10):", "\"\"\" --------------------------------------------------------------------------- Encodes the vColumn with the One-Hot Encoding algorithm.", "/ TIME ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK /", "self.parent # ---# def median( self, approx: bool = True,", "= \" WHERE {} IS NOT NULL \".format(self.alias) if (dropna)", "BY {} ORDER BY _verticapy_cnt_ DESC {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias,", "using the 'topk' method. RFmodel_params: dict, optional Dictionary of the", "Combination of Freedman Diaconis and Sturges. freedman_diaconis : Freedman Diaconis", "bars. It is only valid if the vColumns are numerical.", "int / float / text / binary / spatial /", "def max(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'max' (Maximum).", "def aad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'aad' (Average", "relevant interval to use for the discretization. topk : Keeps", "to the lower bound itself and the values higher than", ") name = ( name.replace(\" \", \"_\") .replace(\"/\", \"_\") .replace(\",\",", "str = \"\", method: str = \"density\", of: str =", "to {}.\".format( self.alias, dtype ) ) return self.parent except Exception", "the topk most frequent categories and merge the other into", "Robust Z-Score - The MAD is null !\".format( self.alias )", "vDataFrame has been transformed multiple times, so it's better practice", "new_column = \"DECODE({}, NULL, 0, 1)\" elif method in (\"mean\",", "sign : arithmetic sign sin : trigonometric sine sinh :", "method, of, max_cardinality, h, ax=ax, **style_kwds, ) # ---# def", "If empty, an optimized number of nbins will be computed.", "= (6, 6), h: Union[int, float, tuple] = (None, None),", "in sublist] # ---# def div(self, x: float): \"\"\" ---------------------------------------------------------------------------", "pt_events, CASE WHEN non_events = 0 OR events = 0", "(quote_ident(column) in func) or ( re.search( re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"', \"\"))), func )", "on an aggregation. \"\"\" check_types( [ (\"by\", by, [str]), (\"method\",", "axes. ax: Matplotlib axes object, optional The axes to plot", "than quantile(alpha) or greater than quantile(1-alpha) will be dropped. Returns", "-> x^2 + 2 use \"POWER({}, 2) + 2\". copy_name:", "\"date\" # ---# def isin(self, val: list, *args): \"\"\" ---------------------------------------------------------------------------", "The vColumn will be transformed. Parameters ---------- field: str The", "] # ---# def range_plot( self, ts: str, q: tuple", "* ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS iv FROM ({}) x", "\"{}\", \"{}\", partition_by, order_by_ts ) if method in (\"mean\", \"median\")", "1: pre_comp = self.parent.__get_catalog_value__(self.alias, \"top\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": if", "spatial / uuid / undefined Returns ------- str vColumn category.", "[str]), (\"max_cardinality\", max_cardinality, [list]), (\"h\", h, [list, float, int]), ]", "\"\"\" --------------------------------------------------------------------------- Draws the pie chart of the vColumn based", "dict, optional Method to use to impute the missing values.", "TS methods. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].dropna", "by[0], \", \".join( [\"{}, {}\".format(elem[0], elem[1]) for elem in result]", "data transformation operations, and offers beautiful graphical options. # #", "str = \"gaussian\", nbins: int = 200, xlim: tuple =", "sum std : standard deviation topk : kth most occurent", "self.alias, ) query_result = executeSQL( query=query, title=\"Computing the distinct categories", "# ---# def apply_fun(self, func: str, x: float = 2):", "aggregations.\", method=\"fetchall\", ) for idx, elem in enumerate(result): result[idx][0] =", "in range(len(distinct_elements) - n): name = ( '\"{}{}\"'.format(prefix, k) if", "if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) columns = [self.alias, by]", "as Response Column.\".format( self.alias, response ) ) if verticapy.options[\"print_info\"]: print(\"The", "bound itself and the values higher than the upper bound", "= 1 / 1.4826 elif method == \"zscore\": self.catalog[\"mean\"] =", "vDataFrame[].drop_outliers : Drops outliers in the vColumn. vDataFrame.outliers : Adds", "result + [self.max()] elif method == \"topk\": assert k >=", "in (\"mean\", \"avg\", \"median\"): fun = \"MEDIAN\" if (method ==", "to use for the discretization. topk : Keeps the topk", "return ts_plot( self, ts, by, start_date, end_date, area, step, ax=ax,", "vColumn. \"\"\" return self.parent.aggregate(func=func, columns=[self.alias]).transpose() agg = aggregate # ---#", "the Time Series of the vColumn. Parameters ---------- ts: str", "hyperbolic tangent x: int/float, optional If the function has two", "\"WHEN {} > {} THEN {} \".format(\"{}\", upper, upper) if", "from verticapy.plot import gen_colors kwargs[\"color\"] = gen_colors()[0] if not (\"legend\"", "elem[1] != None ] ), ) cmax = \"DECODE({}, {},", "threshold, [int, float]), ] ) if use_threshold: result = self.aggregate(func=[\"std\",", "simply because of ambiguous columns naming.\\nBy changing one of \"", "self.ctype(), self.category())] self.transformations += [ (\"AVG({}) OVER (PARTITION BY {})\".format(response,", "self.alias, \", \".join(by) ), \"MIN({}) OVER (PARTITION BY {})\".format( self.alias,", "self.parent.shape()[0], numcol, cast, ) tmp_query += ( \" WHERE {}", "the vColumn using 'aad' (Average Absolute Deviation). Returns ------- float", ": Applies functions to the input vColumns. vDataFrame.applymap : Applies", "the vColumns. vDataFrame.eval : Evaluates a customized expression. \"\"\" if", "def avg(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'avg' (Average).", "the most relevant interval to use for the discretization. topk", "the vColumns outliers using the input method. Parameters ---------- method:", "object \"\"\" columns = [self.alias] check = True if len(args)", "(\"h\", h, [list, float, int]), ] ) if by: self.parent.are_namecols_in(by)", "self.isdate(), ParameterError( \"numh is only available on type numeric|date\" )", "\"[Rename]: The vColumn {} was renamed '{}'.\".format(old_name, new_name) ) return", "* sauv[\"mad\"] ) elif method == \"zscore\": self.catalog[elem] = (sauv[elem]", "{} THEN {} ELSE {} END)\".format( \"{}\", p_alpha, mean_alpha, \"{}\",", "!= \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp store_usage = executeSQL( \"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM", "bound. upper: float, optional Upper bound. Returns ------- vDataFrame self.parent", "Lower bound. upper: float, optional Upper bound. Returns ------- vDataFrame", "self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += [ (\"AVG({}) OVER", "+= [name] all_new_features += [name] conj = \"s were \"", "Exception as e: raise ConversionError( \"{}\\nThe vColumn {} can not", "cardinality (or approximate cardinality). See Also -------- vDataFrame.aggregate : Computes", "product(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'product'. Returns -------", "mean encoding of a response. \"\"\" check_types( [ (\"RFmodel_params\", RFmodel_params,", "of parameter 'breaks' must be greater or equal to 2.\"", "Aggregates the vColumn using 'skewness'. Returns ------- float skewness See", "[str]), (\"by\", by, [list]), (\"order_by\", order_by, [list]), ] ) method", "largest vColumn elements. Parameters ---------- n: int, optional Offset. Returns", "\"{}\", p_1_alpha, mean_1_alpha, \"{}\" ) ) return self.parent # ---#", "column = args[0] elif \"column\" in kwargs: column = kwargs[\"column\"]", "[elem for elem in copy_trans] for elem in sauv: self.catalog[elem]", "is boolean. vDataFrame[].isnum : Returns True if the vColumn is", "self.parent.are_namecols_in(y) y = self.parent.format_colnames(y) assert self.parent[y].nunique() == 2, TypeError( \"vColumn", "using the average of the response partitioned by the different", "), ) else: cmax, cmin = ( \"MAX({}) OVER (PARTITION", "Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. vDataFrame[].mean_encode", "for idx in range(len(breaks) - 1): first_elem, second_elem = breaks[idx],", "on a specific vColumn. \"\"\" return self.parent.aggregate(func=func, columns=[self.alias]).transpose() agg =", "import Union # VerticaPy Modules import verticapy from verticapy.utilities import", "vDataFrame.add_copy : Creates a copy of the vColumn. \"\"\" check_types([(\"new_name\",", "\"cat_stats\"]), (\"max_cardinality\", max_cardinality, [int, float]), (\"numcol\", numcol, [str]), ] )", "otherwise. Example: Write {\"n_estimators\": 20, \"max_depth\": 10} to train a", "by: str = \"\", method: str = \"density\", of: str", "columns=[self.alias]) self.parent.__add_to_history__( \"[Apply]: The vColumn '{}' was transformed with the", ") ) return self.parent # ---# def mad(self): \"\"\" ---------------------------------------------------------------------------", "Also -------- vDataFrame[].dropna : Drops the vColumn missing values. \"\"\"", "regular expression matches in each of the vColumn records by", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"kurtosis\"]).values[self.alias][0]", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"count\"]).values[self.alias][0] # ---#", "record will be sliced using the floor of the slicing", "check_types([(\"lower\", lower, [float, int]), (\"upper\", upper, [float, int])]) assert (lower", "---# def ctype(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn DB type.", "--------------------------------------------------------------------------- Looks if some specific records are in the vColumn", "bins will be computed. h: float, optional Interval width of", "# /\\ \\ / / /\\ ___\\ /\\ __ \\", "transformed with the func 'x -> {}'.\".format( copy_name.replace('\"', \"\"), func.replace(\"{}\",", "# # # ---# class vColumn(str_sql): \"\"\" --------------------------------------------------------------------------- Python object", "Chart of the input vColumns based on an aggregation. \"\"\"", "FROM vdf_table GROUP BY {0} ORDER BY COUNT(*) DESC LIMIT", "func=\"TIME_SLICE({}, {}, '{}', '{}')\".format( \"{}\", length, unit.upper(), start_or_end ) )", "Fills the vColumns outliers using the input method. Parameters ----------", "\" was \" self.parent.__add_to_history__( \"[Get Dummies]: One hot encoder was", "(\"cmap\" in kwargs): from verticapy.plot import gen_cmap kwargs[\"cmap\"] = gen_cmap()[0]", "trees and a maximum depth of 10. response: str, optional", "Random Forest on a response column to find the most", "method=\"fetchall\", ) return [item for sublist in query_result for item", "warnings.warn(warning_message, Warning) else: distinct_elements = self.distinct() expr = [\"DECODE({}\"] text_info", "IS NOT NULL ORDER BY {} DESC LIMIT {}\".format( self.parent.__genSQL__(),", "self.parent.__get_catalog_value__(self.alias, \"top\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": if not (dropna) and", "executeSQL( query=query, title=\"Computing the optimized histogram nbins using Random Forest.\",", "expression in each record of the vColumn. vDataFrame[].str_replace : Replaces", "an optimized number of nbins will be computed. h: float,", "Also -------- vDataFrame.iv_woe : Computes the Information Value (IV) Table.", "was transformed with the func 'x -> {}'.\".format( self.alias.replace('\"', \"\"),", "= 5): \"\"\" --------------------------------------------------------------------------- Returns the head of the vColumn.", "will be picked or computed) nbins: int, optional Number of", "histogram bins.\", method=\"fetchall\", ) result = [elem[0] for elem in", "information, see utilities.tablesample. See Also -------- vDataFrame[].tail : Returns the", "self.alias, method ) ) else: raise TypeError(\"The vColumn must be", "(1 / 3)] sturges : Sturges [CEIL(log2(n)) + 1] Returns", "\"\"\" self.parent.filter(\"{} IS NOT NULL\".format(self.alias)) return self.parent # ---# def", "False, ): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using the input", "return self.apply(func=\"DATE_PART('{}', {})\".format(field, \"{}\")) # ---# def decode(self, *argv): \"\"\"", "sauv[elem] elif elem == None: self.catalog[elem] = None elif method", "it will represent a number. Returns ------- vDataFrame self.parent See", "like. Optimized h will be computed if the parameter is", "will be drawn. ax: Matplotlib axes object, optional The axes", "an input 'quantile'. Parameters ---------- x: float A float between", "range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += final_transformation sauv", "partition. order_by: list, optional List of the vColumns to use", "str(distinct_elements[k]).replace('\"', \"_\") ) ) assert not (self.parent.is_colname_in(name)), NameError( f\"A vColumn", "* mad) minmax : Normalization using the MinMax (min and", "e, func.replace(\"{}\", \"x\"), self.alias.replace('\"', \"\") ) ) # ---# def", "search. Parameters ---------- val: list List of the different records.", "self.parent.__genSQL__() ), print_time_sql=False, ) except: new_column = \"COALESCE({}, {}({}) OVER", "the mean encoding of a response. \"\"\" check_types( [ (\"RFmodel_params\",", "\"\"\" --------------------------------------------------------------------------- Draws the range plot of the vColumn. The", "val.replace(\"'\", \"''\") if val != None: new_column = \"COALESCE({}, '{}')\".format(\"{}\",", "percentile = 0.75) AS Q3, MAX({}) AS max FROM {}\".format(", "+= [lp + tmp_query + rp] query = \"WITH vdf_table", "---# def std(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'std'", "The vColumn will be transformed. Parameters ---------- start: int Start", "[ (\"RFmodel_params\", RFmodel_params, [dict]), (\"return_enum_trans\", return_enum_trans, [bool]), (\"h\", h, [int,", ": Evaluates a customized expression. \"\"\" if isinstance(func, str_sql): func", "), (\"plot_median\", plot_median, [bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts)", "+= [ (\"{}\", self.ctype(), self.category()) ] self.parent[copy_name].transformations += [(func, ctype,", "1 - alpha], [self.alias]) .transpose() .values[self.alias] ) self.parent.filter( \"({} BETWEEN", "else: self.transformations += [trans] sauv = {} for elem in", "of, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int, float]),", "stddev, self.parent.__genSQL__() ), print_time_sql=False, ) except: avg, stddev = (", "self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Mean Encode]: The vColumn {} was transformed", "vColumn_025) / (count) ** (1.0 / 3.0), 1e-99) if method.lower()", "part of the DB version you are using. Returns -------", "average See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "aggregations. \"\"\" return self.aggregate([\"max\"]).values[self.alias][0] # ---# def mean_encode(self, response: str):", "vDataFrame self.parent See Also -------- vDataFrame.eval : Evaluates a customized", "\"\"\" check_types([(\"add_history\", add_history, [bool])]) try: parent = self.parent force_columns =", "\"mean\", \"std\", \"min\", \"approx_25%\", \"approx_50%\", \"approx_75%\", \"max\", ] if method", "Also -------- vDataFrame[].str_count : Computes the number of matches for", "NULL\".format(self.alias) if (dropna) else \"\" query = \"SELECT {} AS", "DB type. Returns ------- str vColumn DB type. \"\"\" return", "self.parent.format_colnames(by) columns = [self.alias, by] else: columns = [self.alias] if", "minimum See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "a maximum depth of 10. response: str, optional Response vColumn", "model.set_params(RFmodel_params) parameters = model.get_params() try: model.fit(tmp_view_name, [self.alias], response) query =", "dtype : vColumn type iqr : interquartile range kurtosis :", "= self.category(), self.ctype() copy_trans = [elem for elem in self.transformations]", "conj)) self.parent.__add_to_history__( \"[Fillna]: {} {} missing value{} filled.\".format( total, self.alias,", "_verticapy_cnt_ FROM {}{}GROUP BY {} ORDER BY _verticapy_cnt_ DESC LIMIT", "data science life cycle, uses a ‘pipeline’ mechanism to sequentialize", "[self.alias, self.ctype()] + result, } if ((is_date) and not (method", "to the input vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) assert", "import ( RandomForestClassifier, RandomForestRegressor, ) drop(tmp_model_name, method=\"model\") if self.parent[response].category() ==", "= 4.0, use_threshold: bool = True, alpha: float = 0.05", "self.parent See Also -------- vDataFrame.eval : Evaluates a customized expression.", "must be composed of two flower brackets {}. For example", "\"int\": h = int(max(math.floor(h), 1)) floor_end = -1 if (self.category()", "= to_tablesample(query, title=title).values elif ( ((distinct_count < max_cardinality + 1)", "computed. nbins: int, optional Number of bins used for the", "`\\ / / # | \\/ / / / #", "options. # # VerticaPy aims to do all of the", "range(len(result)): if result[i][2] == None: pass elif math.isnan(result[i][2]): result[i][2] =", "vColumn element.\", method=\"fetchfirstelem\", ) else: return getattr(self, index) # ---#", "True): \"\"\" --------------------------------------------------------------------------- Returns the k most occurent elements and", "two arguments (example, power or mod), 'x' represents the second", "Union[str, datetime.datetime, datetime.date] = \"\", end_date: Union[str, datetime.datetime, datetime.date] =", "will memorize the already computed aggregations to gain in performance.", "and not (method == \"categorical\"): result = self.aggregate([\"count\", \"min\", \"max\"])", "outliers in the vColumn. vDataFrame.outliers : Adds a new vColumn", "by the different vColumn categories. Parameters ---------- response: str Response", "{} WHEN {} > {} THEN {} ELSE {} END)\".format(", "(\"kernel\", kernel, [\"gaussian\", \"logistic\", \"sigmoid\", \"silverman\"]), (\"bandwidth\", bandwidth, [int, float]),", "return self.quantile(0.5, approx=approx) # ---# def memory_usage(self): \"\"\" --------------------------------------------------------------------------- Returns", "the min sem : standard error of the mean skewness", "head of the vColumn. \"\"\" return self.iloc(limit=limit, offset=-1) # ---#", "val = {self.alias: val} return self.parent.isin(val) # ---# def isnum(self):", ": minimum mode : most occurent element percent : percent", "has two arguments (example, power or mod), 'x' represents the", "utilities.tablesample. See Also -------- vDataFrame[].tail : Returns the a part", "Equal Frequency discretization\" ) total, query, nth_elems = nb, [],", "= \"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [ \"{}, {}\".format(", "Also -------- vDataFrame[].nsmallest : Returns the n smallest elements in", "elem == None: self.catalog[elem] = None elif method == \"robust_zscore\":", "1.01 / nbins if h > 0.01: h = round(h,", "vColumn will be considered as categorical. numcol: str, optional Numerical", "try: parent = self.parent force_columns = [ column for column", "range(len(distinct_elements)): expr += [ \"'{}', {}\".format(str(distinct_elements[k]).replace(\"'\", \"''\"), k) ] text_info", "when using TS methods. Returns ------- vDataFrame self.parent See Also", "bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=xlim_tmp, store=False, ) try: result = model.fit(self.parent.__genSQL__(),", "= executeSQL( query=query, title=\"Computing the optimized histogram nbins using Random", "else: return getattr(self, index) # ---# def __len__(self): return int(self.count())", "of the vColumn. Parameters ---------- by: str, optional vColumn to", "2) elif h > 0.0001: h = round(h, 4) elif", "= ( \"WHEN {} < {} THEN {} \".format(\"{}\", lower,", "boxplot return boxplot(self, by, h, max_cardinality, cat_priority, ax=ax, **style_kwds) #", "occurent elements, how often they occur, and other statistical information.", "{} END)\".format( \"{}\", p_alpha, mean_alpha, \"{}\", p_1_alpha, mean_1_alpha, \"{}\" )", "self.transformations += [ ( \"{}::{}\".format(\"{}\", dtype), dtype, get_category_from_vertica_type(ctype=dtype), ) ]", "== \"int\": best_h = max(math.floor(best_h), 1) return best_h # ---#", "mad *= 1.4826 if mad != 0: if return_trans: return", "DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_ ASC LIMIT 1\".format(", "drop_first, [bool]), (\"use_numbers_as_suffix\", use_numbers_as_suffix, [bool]), ] ) distinct_elements = self.distinct()", "p_1_alpha, \"{}\" ) ) elif method == \"mean\": query =", "if the vColumn category is date. vDataFrame[].isnum : Returns True", ") method = method.lower() self.parent.are_namecols_in([elem for elem in order_by] +", "# ---# def geo_plot(self, *args, **kwargs): \"\"\" --------------------------------------------------------------------------- Draws the", "self.alias, p_1_alpha, ) mean_alpha, mean_1_alpha = [ item[0] for item", "/ DOY / EPOCH / HOUR / ISODOW / ISOWEEK", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" check_types([(\"x\", x,", "self.parent See Also -------- vDataFrame[].str_count : Computes the number of", ") self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) from verticapy.plot import range_curve_vdf return", "if method == \"winsorize\": self.clip(lower=p_alpha, upper=p_1_alpha) elif method == \"null\":", "FROM ({}) x\".format( self.alias, query, ) query = \"SELECT {}", "the slicing instead of the ceiling. Returns ------- vDataFrame self.parent", "ALL \".join(query) ) title = \"Describes the statics of {}", "a specific TS field from the vColumn. \"\"\" check_types( [", "False, ): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn with the One-Hot", "self.parent._VERTICAPY_VARIABLES_[\"columns\"] ] force_columns.remove(self.alias) executeSQL( \"SELECT * FROM {} LIMIT 10\".format(", "\"{}\", str(distinct_elements[k]).replace(\"'\", \"''\") ) transformations = self.transformations + [(expr, \"bool\",", "mechanism to sequentialize # data transformation operations, and offers beautiful", "\"Others\", RFmodel_params: dict = {}, response: str = \"\", return_enum_trans:", "--------------------------------------------------------------------------- Divides the vColumn by the input element. Parameters ----------", "0.0001: h = round(h, 4) elif h > 0.000001: h", "ISODOW / ISOWEEK / ISOYEAR / MICROSECONDS / MILLENNIUM /", ") func = \"(CASE {}{}ELSE {} END)\".format(lower_when, upper_when, \"{}\") self.apply(func=func)", "return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs) # ---# def get_dummies( self, prefix: str", "False, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the range plot", "= ( \"AVG({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by)", "(\"offset\", offset, [int, float])]) if offset < 0: offset =", "\"<\", \"<=\", \"]\", \"]\" else: op1, op2, close_l, close_r =", "quantile(alpha) or greater than quantile(1-alpha) will be dropped. Returns -------", "+= [trans] sauv = {} for elem in self.catalog: sauv[elem]", "|(_(_|| \\/ # / # VerticaPy is a Python library", "square root tan : trigonometric tangent tanh : hyperbolic tangent", ", 'categorical' otherwise. categorical : Uses only categorical aggregations during", "NOT NULL GROUP BY 1 ORDER BY 2 DESC LIMIT", "if it is used to compute other vColumns. Parameters ----------", "to '{}'\".format( e, func.replace(\"{}\", \"x\"), self.alias.replace('\"', \"\") ) ) #", "previously the method on the vColumn \" \"or simply because", "of the vColumn. One vDataFrame can have multiple children vColumns", "Unless required by applicable law or agreed to in writing,", "vColumns are numerical. Optimized h will be computed if the", "the vColumn descriptive statistics. \"\"\" check_types([(\"k\", k, [int, float]), (\"dropna\",", "of the dummies ({name}).\\n\" \"It can be the result of", "AS {} FROM {}\".format( trans, self.alias, self.alias, y, y, self.parent.__genSQL__(),", "p_alpha, p_1_alpha = executeSQL( query=query, title=\"Computing the quantiles of {}.\".format(self.alias),", "of pre-computed aggregations. parent, vDataFrame : Parent of the vColumn.", "the vDataFrame.\".format(self.alias) ) return parent # ---# def drop_outliers( self,", "aggregations. \"\"\" check_types([(\"approx\", approx, [bool])]) if approx: return self.aggregate(func=[\"approx_unique\"]).values[self.alias][0] else:", "element (Constant Interpolation). ffill : Propagation of the first element", "= \"\", start_date: Union[str, datetime.datetime, datetime.date] = \"\", end_date: Union[str,", "/ # VerticaPy is a Python library with scikit-like functionality", "check_types([(\"approx\", approx, [bool])]) if approx: return self.aggregate(func=[\"approx_unique\"]).values[self.alias][0] else: return self.aggregate(func=[\"unique\"]).values[self.alias][0]", "Badr and Fouad are in the vColumn. You can write", "-------- vDataFrame.expected_store_usage : Returns the vDataFrame expected store usage. \"\"\"", "((distinct_count < max_cardinality + 1) and (method != \"numerical\")) or", "input aggregations. \"\"\" return self.aggregate([\"aad\"]).values[self.alias][0] # ---# def abs(self): \"\"\"", "with One-Hot Encoding. \"\"\" check_types([(\"response\", response, [str])]) self.parent.are_namecols_in(response) response =", "'x -> {}'.\".format( self.alias.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) ) return", "If set to True, the method will return the transformation", "odd : CASE ... WHEN vColumn = argv[2 * i]", "set to True, the record will be sliced using the", "if limit <= 0: limit = 0 limit = \"", "to transform the vColumn. The function variable must be composed", "\"{}\", to_replace.replace(\"'\", \"''\"), value.replace(\"'\", \"''\") ) ) # ---# def", "min, max, median, unique... depending on the input method. Parameters", "if (quote_ident(column) in func) or ( re.search( re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"', \"\"))), func", "------- vDataFrame self.parent See Also -------- vDataFrame.apply : Applies functions", "import spider as spider_plot return spider_plot( self.parent, columns, method, of,", "in case of discretization using the method 'topk'\" ) distinct", "The less frequent elements will be gathered together to create", "and self.nunique(True) > 6) else \"mode\" total = self.count() if", "/ # |______ / / / # |____/ / /", "vDataFrame[].isbool : Returns True if the vColumn is boolean. vDataFrame[].isnum", "{} < {}\".format( self.alias, result[\"avg\"][0], result[\"std\"][0], threshold ) ) else:", "-------- vDataFrame[].slice : Slices the vColumn using a time series", "verticapy.plot import range_curve_vdf return range_curve_vdf( self, ts, q, start_date, end_date,", ") title = \"Describes the statics of {} partitioned by", "specific records are in the vColumn and it returns the", "... END odd : CASE ... WHEN vColumn = argv[2", "in range(1, n): trans += \"WHEN {} BETWEEN {} AND", "the 'topk' method. RFmodel_params: dict, optional Dictionary of the Random", "element density unique : cardinality (count distinct) var : variance", "more information, see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html Returns ------- ax Matplotlib axes", "model.drop() return result except: model.drop() raise # ---# def describe(", "the vColumn. The aggregations used are the median and two", "'of'. min : Minimum of the vColumn 'of'. max :", "self.alias, self.parent.__genSQL__(), self.alias ) executeSQL(query, title=\"Testing the Type casting.\") self.transformations", "isinstance(val, str) or not (isinstance(val, Iterable)): val = [val] val", "[int, float])]) if offset < 0: offset = max(0, self.parent.shape()[0]", "vColumn is null, 1 otherwise. expr: str, optional SQL expression.", "dict : Catalog of pre-computed aggregations. parent, vDataFrame : Parent", "self.alias, self.alias, self.alias, table ) result = executeSQL( query, title=\"Different", "method != \"cat_stats\": values = { \"index\": [\"name\", \"dtype\"] +", "the vColumn based on an aggregation. \"\"\" check_types( [ (\"by\",", "an optimized number of bins will be computed. h: float,", "use iv_woe.\".format(y) ) response_cat = self.parent[y].distinct() response_cat.sort() assert response_cat ==", "and method == \"same_freq\": assert nbins >= 2, ParameterError( \"Parameter", "= \"Reads {} {} largest elements.\".format(self.alias, n) return to_tablesample(query, title=title)", "\"minmax\"]), (\"by\", by, [list]), (\"return_trans\", return_trans, [bool]), ] ) method", "float kurtosis See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "x, [int, float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, -({}), {})\".format(x, \"{}\"))", "str): by = [by] if isinstance(order_by, str): order_by = [order_by]", "\"max\": 1, \"count\": self.parent.shape()[0], \"percent\": 100.0, \"unique\": 2, \"approx_unique\": 2,", "+ 5). ax: Matplotlib axes object, optional The axes to", "cmin, \"NULLIFZERO\" if (nullifzero) else \"\", cmax, cmin, ) else:", "bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.shape()[0], self.parent.__genSQL__(), dropna, self.alias, topk, ) result", "# data transformation operations, and offers beautiful graphical options. #", "= \"second\", start: bool = True): \"\"\" --------------------------------------------------------------------------- Slices and", "total, self.alias, conj, ) ) else: if verticapy.options[\"print_info\"]: print(\"Nothing was", "\"\"\" if isinstance(by, str): by = [by] check_types( [ (\"method\",", "return getattr(self, index) # ---# def __len__(self): return int(self.count()) #", "), ) executeSQL( \"SELECT {}, {} FROM {} LIMIT 1\".format(", "mean). Returns ------- float sem See Also -------- vDataFrame.aggregate :", "{}{} OFFSET {} LIMIT 1\".format( self.alias, cast, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index,", "return self.aggregate([\"sum\"]).values[self.alias][0] # ---# def tail(self, limit: int = 5):", "method=\"fetchall\", ) result = [distinct_count, self.count()] + [item[1] for item", "------- vDataFrame self.parent See Also -------- vDataFrame.eval : Evaluates a", "use to order the data\" ) desc = \"\" if", "def decode(self, *argv): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using a", ") except: ctype = get_data_types( \"SELECT {} AS apply_test_feature FROM", "corresponds to an aggregation. vColumns will memorize the already computed", "Number of bins used for the discretization (must be >", "order_by] + by) by = self.parent.format_colnames(by) if method == \"auto\":", "check_types([(\"start\", start, [int, float]), (\"step\", step, [int, float])]) return self.apply(func=\"SUBSTR({},", "self.ctype()] + result, } if ((is_date) and not (method ==", ") for i in range(parameters[\"n_estimators\"]) ] query = \"SELECT split_value", "by, [str]), (\"kernel\", kernel, [\"gaussian\", \"logistic\", \"sigmoid\", \"silverman\"]), (\"bandwidth\", bandwidth,", "Uses only categorical aggregations during the computation. cat_stats : Computes", "method: str, optional The describe method. auto : Sets the", "date_part(self, field: str): \"\"\" --------------------------------------------------------------------------- Extracts a specific TS field", "* (int(sauv[\"count\"]) + total) / self.parent.shape()[0] ) except: pass total", "else \"NULL\", elem[1] if elem[1] != None else \"NULL\", )", "filled. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].drop_outliers :", "the vColumn is numerical. See Also -------- vDataFrame[].isbool : Returns", "self.transformations ) for k in range(max_floor): self.transformations += [(\"{}\", self.ctype(),", "query, ) query = \"SELECT {} AS index, non_events, events,", "result = executeSQL( query, title=\"Computing the top{} categories of {}.\".format(", "with scikit-like functionality for conducting # data science projects on", "title = \"Reads {} {} smallest elements.\".format(n, self.alias) return to_tablesample(query,", "to solve this issue.\" ) new_vColumn = vColumn( name, parent=self.parent,", "if isinstance(order_by, str): order_by = [order_by] check_types( [ ( \"method\",", "category is date. See Also -------- vDataFrame[].isbool : Returns True", "discretization (must be > 1) Returns ------- tablesample An object", "len(cat) == 1: lp, rp = \"(\", \")\" else: lp,", "return self mad, med = self.aggregate([\"mad\", \"approx_median\"]).values[self.alias] mad *= 1.4826", "/ # \\ / # \\ / # \\/ #", ") return self.parent # ---# def distinct(self, **kwargs): \"\"\" ---------------------------------------------------------------------------", "{}({}) FROM {} GROUP BY {};\".format( by[0], fun, self.alias, self.parent.__genSQL__(),", "input aggregations. \"\"\" return self.aggregate([\"sem\"]).values[self.alias][0] # ---# def skewness(self): \"\"\"", "return spider_plot( self.parent, columns, method, of, max_cardinality, h, ax=ax, **style_kwds,", "x)) # ---# def nlargest(self, n: int = 10): \"\"\"", "sys.getsizeof(elem) return total # ---# def min(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "must be empty.\" ) conditions, column = [], self.alias for", "[], ): \"\"\" --------------------------------------------------------------------------- Fills missing elements in the vColumn", "DESC\".format( self.alias, bin_spatial_to_str(self.category(), self.alias), self.alias, kwargs[\"agg\"], self.parent.__genSQL__(), self.alias, ) query_result", "\" ELSE NULL END)\" trans = (trans, \"varchar\", \"text\") if", "vColumn using 'count' (Number of non-Missing elements). Returns ------- int", "str = \"\", h: float = 0, max_cardinality: int =", "= \"\" if (n == 1) else str(int(n)) if isinstance(top,", "to 0, an optimised interval will be computed. nbins: int,", "return_trans: return \"({} - {}) / {}({})\".format( self.alias, avg, \"NULLIFZERO\"", "func.replace(\"{}\", \"x\"), self.alias.replace('\"', \"\") ) ) # ---# def apply_fun(self,", "\"\", max_cardinality: int = 6, nbins: int = 0, h:", "( method == \"is_numeric\" ): self.parent.__update_catalog__({\"index\": index, self.alias: result}) for", "you'll be able to solve this \" \"issue.\" ) for", "close_r = \"<\", \"<=\", \"]\", \"]\" else: op1, op2, close_l,", "Returns the a part of the vColumn. \"\"\" return self.iloc(limit=limit)", ": Slices the vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return self.apply(", "def str_slice(self, start: int, step: int): \"\"\" --------------------------------------------------------------------------- Slices the", "by, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int, float]),", "# ---# def plot( self, ts: str, by: str =", "type. \"\"\" check_types([(\"dtype\", dtype, [str])]) try: query = \"SELECT {}::{}", "if return_enum_trans: return trans else: self.transformations += [trans] sauv =", "ELSE {} END)\".format( \"{}\", p_alpha, mean_alpha, \"{}\", p_1_alpha, mean_1_alpha, \"{}\"", "ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK / YEAR Returns", "= ( self.parent.quantile([alpha, 1 - alpha], [self.alias]) .transpose() .values[self.alias] )", "plot_median: bool = False, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws", "the vColumn based on an aggregation. Parameters ---------- method: str,", "the vColumn. abs : absolute value acos : trigonometric inverse", "text_info = \"\\n\" for k in range(len(distinct_elements)): expr += [", "[distinct_count] + result index = [ \"unique\", \"count\", \"mean\", \"std\",", "and lower outliers by their respective average. null : Replaces", "= True if pie_type == \"donut\" else False rose =", "else the lower and upper ZScores. threshold: float, optional Uses", "pat: str Regular expression. Returns ------- vDataFrame self.parent See Also", "new_category: str, optional The name of the merging category when", "in self.transformations], catalog=self.catalog, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name[1:-1], new_vColumn)", "( self.parent.quantile([alpha, 1 - alpha], [self.alias]) .transpose() .values[self.alias] ) self.parent.filter(", "input vColumns from the vDataFrame. \"\"\" check_types([(\"add_history\", add_history, [bool])]) try:", ": Sets the method to 'numerical' if the vColumn is", "assert x != 0, ValueError(\"Division by 0 is forbidden !\")", "rp = \"(\", \")\" else: lp, rp = \"\", \"\"", "= self.parent.format_colnames(of) from verticapy.plot import pie return pie( self, method,", "vDataFrame.iv_woe : Computes the Information Value (IV) Table. \"\"\" check_types([(\"y\",", "default function to the vColumn. Parameters ---------- func: str Function", "100.0, \"unique\": 2, \"approx_unique\": 2, \"prod\": 0, }, ) setattr(self.parent,", "You may obtain a copy of the License at #", "[str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"nbins\", nbins, [int, float]), (\"h\",", "[elem for elem in copy_trans] raise QueryError(\"{}\\nAn Error happened during", "alpha, self.alias, alpha, self.alias, self.parent.__genSQL__() ) p_alpha, p_1_alpha = executeSQL(", "= executeSQL( query, title=\"Different aggregations to compute the optimal h.\",", "mode of the vColumn. Returns ------- str/float/int vColumn nth most", "return the transformation used instead of the parent vDataFrame. This", "quote_ident(self.alias) new_name = new_name.replace('\"', \"\") assert not (self.parent.is_colname_in(new_name)), NameError( f\"A", "len(self.parent[column].transformations), max_floor ) except: pass max_floor -= len(self.transformations) if copy_name:", "__init__( self, alias: str, transformations: list = [], parent=None, catalog:", ") result = [distinct_count] + result index = [ \"unique\",", "Special Methods # # ---# def __init__( self, alias: str,", "q% : q quantile (ex: 50% for the median) prod", "Also -------- vDataFrame.fill_outliers : Fills the outliers in the vColumn.", "= \"\\n\" for k in range(len(distinct_elements)): expr += [ \"'{}',", "n: int Number of digits to keep after the comma.", "numerical (except ints and bools), a RF Classifier otherwise. Example:", "force_columns = [ column for column in self.parent._VERTICAPY_VARIABLES_[\"columns\"] ] force_columns.remove(self.alias)", "= 0.05 ): \"\"\" --------------------------------------------------------------------------- Drops outliers in the vColumn.", "--------------------------------------------------------------------------- Returns the nth most occurent element. Parameters ---------- dropna:", "the data when 'ts' is greater than November 1993 the", "to apply the function: x -> x^2 + 2 use", "parameter is empty or invalid. max_cardinality: int, optional Maximum number", "value acos : trigonometric inverse cosine asin : trigonometric inverse", "the top{} categories of {}.\".format( k if k > 0", "[int, float]), (\"h\", h, [int, float]), (\"pie_type\", pie_type, [\"auto\", \"donut\",", "distribution to identify outliers. After normalizing the data (Z-Score), if", "\"ffill\", \"pad\", \"bfill\", \"backfill\", ], ), (\"expr\", expr, [str]), (\"by\",", "self.parent.shape()[0] tail.offset = offset tail.dtype[self.alias] = self.ctype() tail.name = self.alias", "plot of the input vColumn based on an aggregation. Parameters", "WHERE {0} IS NOT NULL GROUP BY {0} ORDER BY", ": Draws the Box Plot of the input vColumns. \"\"\"", "Upper bound. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].fill_outliers", "int, optional Number of most occurent elements to return. dropna:", "ROW_NUMBER() OVER (ORDER BY {}) AS _verticapy_row_nb_ FROM {} WHERE", "= gen_tmp_name(schema=schema, name=\"model\") assert nbins >= 2, ParameterError( \"Parameter 'nbins'", "0 THEN 0 ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS woe,", "the outliers. After normalizing the data (Z-Score), if the absolute", "func=\"REGEXP_SUBSTR({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def str_replace(self, to_replace:", "== None): val = self.mode(dropna=True) if val == None: warning_message", "MIN and the MAX. MAX = MIN !\".format( self.alias )", "will be transformed. Parameters ---------- to_replace: str Regular expression to", "OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ), \"MIN({}) OVER", "(\"by\", by, [list]), (\"return_trans\", return_trans, [bool]), ] ) method =", "NULL GROUP BY {0} ORDER BY COUNT(*)\" \" DESC OFFSET", "valid if the vColumns are numerical. Optimized h will be", "self.apply( func=\"REGEXP_COUNT({}, '{}') > 0\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---#", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "vColumn record by an input value. The vColumn will be", "x: float): \"\"\" --------------------------------------------------------------------------- Subtracts the input element from the", "is in each of the vColumn records. vDataFrame[].extract : Extracts", "fd) self.parent.__update_catalog__({\"index\": [\"numh\"], self.alias: [best_h]}) if self.category() == \"int\": best_h", "method.lower() in (\"freedman_diaconis\", \"fd\"): best_h = fd else: best_h =", "method: str = \"density\", of: str = \"\", max_cardinality: Union[int,", "[ (\"method\", method, [\"auto\", \"numerical\", \"categorical\", \"cat_stats\"]), (\"max_cardinality\", max_cardinality, [int,", "self.catalog[\"percent\"] = ( 100 * sauv[\"count\"] / self.parent.shape()[0] ) except:", "\"\"\" return self.aggregate([\"count\"]).values[self.alias][0] # ---# def cut( self, breaks: list,", "= \"SELECT * FROM {} WHERE {} IS NOT NULL", "like (date, datetime, timestamp...) or numerical. by: str, optional vColumn", "self.iloc(limit=limit) # ---# def hist( self, method: str = \"density\",", "x: float, approx: bool = True): \"\"\" --------------------------------------------------------------------------- Aggregates the", "= \"SELECT {} AS {}, COUNT(*) AS _verticapy_cnt_, 100 *", "astype(self, dtype: str): \"\"\" --------------------------------------------------------------------------- Converts the vColumn to the", "also be a cutomized aggregation (ex: AVG(column1) + 5). ax:", "# ---# def value_counts(self, k: int = 30): \"\"\" ---------------------------------------------------------------------------", "if right: op1, op2, close_l, close_r = \"<\", \"<=\", \"]\",", "labeled with 0 and 1 (1 meaning global outlier). \"\"\"", "result = model.fit(self.parent.__genSQL__(), [self.alias]).plot( ax=ax, **style_kwds ) model.drop() return result", "the intervals will be closed on the right. Returns -------", "This parameter is very useful for testing to be able", "= \"SELECT PERCENTILE_CONT({}) WITHIN GROUP (ORDER BY {}) OVER (),", "\"bfill\", \"backfill\"]: all_partition += [elem for elem in order_by] for", "(14, 10) return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs) # ---# def get_dummies( self,", "area, [bool]), (\"step\", step, [bool]), ] ) self.parent.are_namecols_in(ts) ts =", "one column of that relation. vColumns simplify several processes with", "# \\ / / / # \\_______/ / / #", "bar width. Parameters ---------- method: str, optional Method to use", "optional The method to use to discretize the vColumn. auto", "the parameter 'by' is empty\\nIf you want to normalize by", "numh(self, method: str = \"auto\"): \"\"\" --------------------------------------------------------------------------- Computes the optimal", "{}\".format( self.alias, result[\"avg\"][0], result[\"std\"][0], threshold ) ) else: p_alpha, p_1_alpha", "return self.parent if isinstance(val, str): val = val.replace(\"'\", \"''\") if", "vdf_table WHERE {} > {})\".format( self.parent.__genSQL__(), self.alias, self.alias, p_alpha, self.alias,", "in range(len(result)): if result[i][2] == None: pass elif math.isnan(result[i][2]): result[i][2]", "100 * sauv[\"count\"] / self.parent.shape()[0] ) for elem in sauv:", "[], return_trans: bool = False ): \"\"\" --------------------------------------------------------------------------- Normalizes the", "method, [ \"auto\", \"mode\", \"0ifnull\", \"mean\", \"avg\", \"median\", \"ffill\", \"pad\",", "= ( \"'[' || FLOOR({} / {}) * {} ||", "of non-missing elements cvar : conditional value at risk dtype", "BY 1) x ORDER BY verticapy_agg DESC\".format( self.alias, bin_spatial_to_str(self.category(), self.alias),", "analytical function on a specific vColumn. \"\"\" return self.parent.aggregate(func=func, columns=[self.alias]).transpose()", "if isinstance(by, str): by = [by] if isinstance(order_by, str): order_by", "as spider_plot return spider_plot( self.parent, columns, method, of, max_cardinality, h,", "vColumn will be transformed. Parameters ---------- start: int Start of", "optional Catalog where each key corresponds to an aggregation. vColumns", ") self.parent.__update_catalog__( {\"index\": [\"store_usage\"], self.alias: [store_usage]} ) return store_usage #", "not (\"cmap\" in kwargs): from verticapy.plot import gen_cmap kwargs[\"cmap\"] =", "categories of {}.\".format( k if k > 0 else \"\",", "... WHEN vColumn = argv[2 * i] THEN argv[2 *", "method in zscore|minmax\" warnings.warn(warning_message, Warning) return self mad, med =", "str): \"\"\" --------------------------------------------------------------------------- Adds a copy vColumn to the parent", "an aggregation. \"\"\" check_types( [ (\"method\", method, [str]), (\"of\", of,", "Python Modules import math, re, decimal, warnings, datetime from collections.abc", "Returns ------- int number of non-Missing elements. See Also --------", "AS {}, {} AS verticapy_agg FROM {} WHERE {} IS", ": Drops outliers in the vColumn. vDataFrame.outliers : Adds a", "See Also -------- vDataFrame.donut : Draws the donut chart of", "if the vColumn will be considered as categorical. numcol: str,", "return self.aggregate([\"count\"]).values[self.alias][0] # ---# def cut( self, breaks: list, labels:", "top = None if not (result) else result[0][0] if not", "have one parent. catalog: dict, optional Catalog where each key", "(\"length\", length, [int, float]), (\"unit\", unit, [str]), (\"start\", start, [bool]),", "step: int Size of the slicing. Returns ------- vDataFrame self.parent", "/ DOQ / DOW / DOY / EPOCH / HOUR", "\"\"\" --------------------------------------------------------------------------- Extracts the regular expression in each record of", "columns, title=by, loc=\"center left\", bbox_to_anchor=[1, 0.5], ) ax.set_xlabel(self.alias) return ax", "in query_result] else: result = ( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False", "return pre_comp assert self.isnum() or self.isdate(), ParameterError( \"numh is only", "See Also -------- vDataFrame[].decode : Encodes the vColumn using a", "AS 'approx_50%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.75) AS", "def isdate(self): \"\"\" --------------------------------------------------------------------------- Returns True if the vColumn category", "user defined Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot", "desc = \"\" if (method in (\"ffill\", \"pad\")) else \"", "upper_when, \"{}\") self.apply(func=func) return self.parent # ---# def count(self): \"\"\"", "2 use \"POWER({}, 2) + 2\". copy_name: str, optional If", "discretization using the method 'topk'\" ) distinct = self.topk(k).values[\"index\"] trans", "(nullifzero) else \"\", stddev ), \"float\", \"float\", ) ] elif", "self.apply(func=expr) # ---# def ctype(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn", "Average of the vColumn 'of'. min : Minimum of the", "the vColumn. See Also -------- vDataFrame.topk : Returns the vColumn", "optimal vColumn bar width. Parameters ---------- method: str, optional Method", "= self.mode(dropna=True) if val == None: warning_message = \"The vColumn", "(dropna): n = \"\" if (n == 1) else str(int(n))", "\"_\") .replace(\",\", \"_\") .replace(\"'\", \"_\") ) expr = \"DECODE({}, '{}',", "{} AS percent FROM {}{} GROUP BY {} ORDER BY", "on booleans\".format(self.alias) warnings.warn(warning_message, Warning) elif self.isnum(): if method == \"zscore\":", "Regressor will be trained if the response is numerical (except", "self.parent See Also -------- vDataFrame[].str_contains : Verifies if the regular", "range_curve_vdf( self, ts, q, start_date, end_date, plot_median, ax=ax, **style_kwds, )", "Aggregates the vColumn using 'std' (Standard Deviation). Returns ------- float", "cast = \"::int\" if (self.parent[numcol].isbool()) else \"\" query, cat =", "quantile(self, x: float, approx: bool = True): \"\"\" --------------------------------------------------------------------------- Aggregates", "elif method == \"minmax\": self.catalog[\"min\"] = 0 self.catalog[\"max\"] = 1", "--------------------------------------------------------------------------- Draws the spider plot of the input vColumn based", "(\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"nbins\", nbins, [int,", "list, optional List of the vColumns to use to sort", "Returns ------- float std See Also -------- vDataFrame.aggregate : Computes", "Returns ------- str vColumn DB type. \"\"\" return self.transformations[-1][1].lower() dtype", "upper=p_1_alpha) elif method == \"null\": self.apply( func=\"(CASE WHEN ({} BETWEEN", "Number of elements to display. Returns ------- tablesample An object", "(response = {}).\".format(self.alias, y) result = to_tablesample(query, title=title) result.values[\"index\"] +=", "BY {}\".format( by[0], self.alias, self.alias, self.parent.__genSQL__(), by[0], ), title=\"Computing the", "range(len(values[elem])): if isinstance(values[elem][i], decimal.Decimal): values[elem][i] = float(values[elem][i]) return tablesample(values) #", "sauv[\"mean\"]) / sauv[ \"std\" ] elif method == \"minmax\": self.catalog[elem]", "self.apply( func=\"REGEXP_REPLACE({}, '{}', '{}')\".format( \"{}\", to_replace.replace(\"'\", \"''\"), value.replace(\"'\", \"''\") )", "{1}) VERTICAPY_SUBTABLE) ORDER BY count DESC\" ).format(self.alias, max_cardinality + 1)", "vColumn category. See Also -------- vDataFrame[].ctype : Returns the vColumn", "upper bound itself. Parameters ---------- lower: float, optional Lower bound.", "'aad' (Average Absolute Deviation). Returns ------- float aad See Also", "Computes the Information Value (IV) Table. \"\"\" check_types([(\"y\", y, [str]),", "in [\"date\", \"float\"]: warning_message = ( \"label_encode is only available", "optional If set to True, the approximate median is returned.", "by elements, please use a method in zscore|minmax\" warnings.warn(warning_message, Warning)", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_count : Computes", "\"median\"): fun = \"MEDIAN\" if (method == \"median\") else \"AVG\"", "[str]), (\"start\", start, [bool]), ] ) start_or_end = \"START\" if", "cmin, self.parent.__genSQL__() ), print_time_sql=False, ) except: cmax, cmin = (", "optional Tuple including the 2 quantiles used to draw the", "method: str = \"auto\"): \"\"\" --------------------------------------------------------------------------- Computes the optimal vColumn", "(\"labels\", labels, [list]), (\"include_lowest\", include_lowest, [bool]), (\"right\", right, [bool]), ]", "if verticapy.options[\"print_info\"]: print(\"Nothing was filled.\") self.transformations = [elem for elem", "vDataFrame input aggregations. \"\"\" return self.quantile(0.5, approx=approx) # ---# def", "elem in result if elem[2] != None ] ), )", "Slices the vColumn using a time series rule. \"\"\" return", "= ( self.nunique(), self.isnum(), self.isdate(), ) if (is_date) and not", "\".format( \"{}\", result[i - 1], result[i], result[i - 1], result[i]", "method=\"same_width\" if self.isnum() else \"topk\", nbins=nbins, k=nbins, new_category=\"Others\", return_enum_trans=True, )[0].replace(\"{}\",", "item in result], \"count\": [int(item[1]) for item in result], \"percent\":", "\"{}\")) else: return self.apply(func=\"{} - ({})\".format(\"{}\", x)) # ---# def", "of the following: CENTURY / DAY / DECADE / DOQ", "= self.parent.format_colnames(by) from verticapy.plot import boxplot return boxplot(self, by, h,", "elem in result] except: drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") raise drop(tmp_view_name,", "vDataFrame self.parent See Also -------- vDataFrame.apply : Applies functions to", "pie( self, method: str = \"density\", of: str = \"\",", "when the parent vDataFrame is modified. Attributes ---------- alias, str", "self.alias, self.alias, self.parent.__genSQL__(), self.alias, where, ) result = executeSQL( query=query,", "vColumn( name, parent=self.parent, transformations=[item for item in self.transformations], catalog=self.catalog, )", "- ({})\".format(\"{}\", x)) # ---# def sum(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "search. See Also -------- vDataFrame.isin : Looks if some specific", "optional Number of bins. If empty, an optimized number of", "information, see utilities.tablesample. See Also -------- vDataFrame[].nsmallest : Returns the", "xlim: tuple = None, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws", "if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import gen_colors", "| |(_(_|| \\/ # / # VerticaPy is a Python", "in all_cols: try: if (quote_ident(column) in func) or ( re.search(", "DAY / DECADE / DOQ / DOW / DOY /", "for the plot. gaussian : Gaussian kernel. logistic : Logistic", "in each record of the vColumn. vDataFrame[].str_slice : Slices the", "tail.dtype[self.alias] = self.ctype() tail.name = self.alias return tail # ---#", "{} IS NOT NULL\".format(self.alias) if (dropna) else \"\" query =", "str): \"\"\" --------------------------------------------------------------------------- Extracts a specific TS field from the", "is date. vDataFrame[].isnum : Returns True if the vColumn is", ": Sigmoid kernel. silverman : Silverman kernel. nbins: int, optional", "method=\"fetchrow\", ) if method == \"winsorize\": self.clip(lower=p_alpha, upper=p_1_alpha) elif method", "floor of the slicing instead of the ceiling. Returns -------", "!\".format( self.alias ) warnings.warn(warning_message, Warning) return self elif n ==", "\"AVG\": val = self.avg() elif fun == \"MEDIAN\": val =", "labels: list, optional Labels used to name the new categories.", "IS NOT NULL GROUP BY 1) x ORDER BY verticapy_agg", "customized expression. \"\"\" check_types([(\"name\", name, [str])]) name = quote_ident(name.replace('\"', \"_\"))", "\"smart\", \"same_width\", \"same_freq\", \"topk\"], ), (\"return_enum_trans\", return_enum_trans, [bool]), ] )", "float): \"\"\" --------------------------------------------------------------------------- Multiplies the vColumn by the input element.", "= round(h, 2) elif h > 0.0001: h = round(h,", "(example, power or mod), 'x' represents the second argument. Returns", "check if Badr and Fouad are in the vColumn. You", "{column} {op2} '{second_elem}' THEN '{label}'\" ] expr = \"CASE WHEN", "cat = [], self.distinct() if len(cat) == 1: lp, rp", "--------------------------------------------------------------------------- Encodes the vColumn using a bijection from the different", "the final transformation. Returns ------- vDataFrame self.parent See Also --------", "distributed under the License is distributed on an \"AS IS\"", "self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index, ) return executeSQL( query=query, title=\"Getting the vColumn", "optimal If set to True, the method will return the", "the mode.\", method=\"fetchall\", ) top = None if not (result)", "Returns ------- float/str maximum See Also -------- vDataFrame.aggregate : Computes", "optional Dictionary of the Random Forest model parameters used to", "kwargs: column = kwargs[\"column\"] else: check = False if check:", "({}) THEN {} || '' ELSE '{}' END)\".format( bin_spatial_to_str(self.category()), \",", "of vColumn distinct elements to be used as categorical. The", "to create a new category : 'Others'. cat_priority: list, optional", "upper_when = ( \"WHEN {} > {} THEN {} \".format(\"{}\",", "{} was converted to {}.\".format( self.alias, dtype ) ) return", "NULL LIMIT 0\".format( func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", )", "\" UNION ALL \".join(query), nbins - 1 ) result =", "0 self.catalog[\"mad\"] = 1 / 1.4826 elif method == \"zscore\":", "{} ASC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias, n ) title", "\"is_numeric\" ): self.parent.__update_catalog__({\"index\": index, self.alias: result}) for elem in values:", "self.isnum() and method == \"smart\": schema = verticapy.options[\"temp_schema\"] if not", "sauv = {} for elem in self.catalog: sauv[elem] = self.catalog[elem]", "density( self, by: str = \"\", bandwidth: float = 1.0,", "the logic to the data. # # # Modules #", "# speed and built-in analytics and machine learning features. It", "int, optional Number of elements to skip. Returns ------- tablesample", "'same_freq'\" ) count = self.count() nb = int(float(count / int(nbins)))", "rose : Rose chart. It can also be a cutomized", "max_cardinality, h, donut, rose, ax=None, **style_kwds, ) # ---# def", "self.nunique(), self.isnum(), self.isdate(), ) if (is_date) and not (method ==", "({3}{4} USING PARAMETERS percentile = 0.9) AS 'approx_90%', MAX({3}{4}) AS", "**style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the range plot of the", "**style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the histogram of the vColumn", "expr += [ \"'{}', {}\".format(str(distinct_elements[k]).replace(\"'\", \"''\"), k) ] text_info +=", "testing to be able to look at the final transformation.", "h, [list, float, int]), ] ) if by: self.parent.are_namecols_in(by) by", "Numerical vColumn to use when the parameter method is set", "(SELECT {} AS {}, {} AS verticapy_agg FROM {} WHERE", "of = self.parent.format_colnames(of) from verticapy.plot import spider as spider_plot return", "self.alias, n ) title = \"Reads {} {} largest elements.\".format(self.alias,", "expression. \"\"\" self.parent.filter(\"{} IS NOT NULL\".format(self.alias)) return self.parent # ---#", "decrease. Returns ------- float quantile (or approximate quantile). See Also", "vColumn '{}' was transformed with the func 'x -> {}'.\".format(", "(\"nbins\", nbins, [int])]) self.parent.are_namecols_in(y) y = self.parent.format_colnames(y) assert self.parent[y].nunique() ==", "STDDEV({3}{4}) AS std, MIN({3}{4}) AS min, APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS", "for elem in result if elem[2] != None ] ),", "start: int Start of the slicing. step: int Size of", "\"\"\" --------------------------------------------------------------------------- Converts the vColumn to the input type. Parameters", "- sauv[\"mean\"]) / sauv[ \"std\" ] elif method == \"minmax\":", "Returns True if the vColumn is numerical. \"\"\" return self.category()", "max_cardinality, [int, float]), (\"nbins\", nbins, [int, float]), (\"h\", h, [int,", "a user-defined encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode :", "'03-11-1993' will filter the data when 'ts' is lesser than", "+ \"{}%\".format(x * 100)]).values[self.alias][ 0 ] # ---# def range_plot(", "--------------------------------------------------------------------------- Extracts the regular expression in each record of the", "least 'lower' or 'upper' must have a numerical value\" )", "# ---# def avg(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", ") if method in (\"mean\", \"median\") or isinstance(val, float): category,", ": Rose chart. It can also be a cutomized aggregation", "def drop(self, add_history: bool = True): \"\"\" --------------------------------------------------------------------------- Drops the", "self.category(), self.ctype() copy_trans = [elem for elem in self.transformations] total", "-------- vDataFrame.bar : Draws the Bar Chart of the input", "of the vColumn. The aggregations used are the median and", "Gaussian kernel. logistic : Logistic kernel. sigmoid : Sigmoid kernel.", "the vColumn is boolean, False otherwise. Returns ------- bool True", "------- ax Matplotlib axes object \"\"\" columns = [self.alias] check", "\"0ifnull\": new_column = \"DECODE({}, NULL, 0, 1)\" elif method in", "self.parent.format_colnames(y) assert self.parent[y].nunique() == 2, TypeError( \"vColumn {} must be", "columns=[self.alias]) self.catalog[\"count\"] = self.parent.shape()[0] self.catalog[\"percent\"] = 100 self.parent.__add_to_history__( \"[Label Encoding]:", "h: float, optional The interval size to convert to use", "vColumn. Parameters ---------- func: str, Function in pure SQL used", "vColumn. \"\"\" check_types( [ ( \"func\", func, [ \"abs\", \"acos\",", "if the parameter is empty or invalid. max_cardinality: int, optional", "float/str maximum See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "vColumn with Label Encoding. vDataFrame[].mean_encode : Encodes the vColumn using", "sum : Sum of the vColumn 'of'. q% : q", "= True, ): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using the", "the data when using TS methods. Returns ------- vDataFrame self.parent", "-------- vDataFrame[].decode : Encodes the vColumn using a user-defined encoding.", "-------- vDataFrame[].head : Returns the head of the vColumn. \"\"\"", "Draws the Geospatial object. Parameters ---------- *args / **kwargs Any", "(\"nbins\", nbins, [int, float]), ( \"method\", method, [\"auto\", \"smart\", \"same_width\",", "element. Parameters ---------- dropna: bool, optional If set to True,", "numerical aggregations during the computation. max_cardinality: int, optional Cardinality threshold", "the approximate quantile is returned. By setting this parameter to", "\"atan\", \"cbrt\", \"ceil\", \"cos\", \"cosh\", \"cot\", \"exp\", \"floor\", \"ln\", \"log\",", "(\"q\", q, [tuple]), ( \"start_date\", start_date, [str, datetime.datetime, datetime.date, int,", "[ (\"method\", method, [\"zscore\", \"robust_zscore\", \"minmax\"]), (\"by\", by, [list]), (\"return_trans\",", "list = [], ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the", "if method != \"cat_stats\": values = { \"index\": [\"name\", \"dtype\"]", "used are the median and two input quantiles. Parameters ----------", "(method == \"mode\") and (val == None): val = self.mode(dropna=True)", "------- float average See Also -------- vDataFrame.aggregate : Computes the", "pt_events FROM ({}) x\".format( self.alias, query, ) query = \"SELECT", "True if the vColumn category is date. vDataFrame[].isnum : Returns", "'\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) name = ( name.replace(\"", "is very useful for testing to be able to look", "({name}).\\n\" \"It can be the result of using previously the", "of distinct elements for vColumns 1 and 2 to be", "slice): assert index.step in (1, None), ValueError( \"vColumn doesn't allow", "int, optional Maximum number of points to use to evaluate", "response: str = \"\", return_enum_trans: bool = False, ): \"\"\"", "str = \"auto\", ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the", "vColumn is numerical or of type date like. Optimized h", "\\/ / / # / / # / / #", "BY {}\".format( \", \".join([quote_ident(column) for column in by]) ) if", "# ---# def store_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn expected", "columns=[self.alias]) total = abs(self.count() - total) except Exception as e:", "\"spearmand\": {}, \"kendall\": {}, \"cramer\": {}, \"biserial\": {}, \"regr_avgx\": {},", "/ int / float / text / binary / spatial", "self.parent.__add_to_history__( \"[Label Encoding]: Label Encoding was applied to the vColumn", "\"min\": 0, \"max\": 1, \"count\": self.parent.shape()[0], \"percent\": 100.0, \"unique\": 2,", "from verticapy.plot import gen_colors from matplotlib.lines import Line2D colors =", "the vDataFrame represents the entire relation, a vColumn can be", "# ---# def sub(self, x: float): \"\"\" --------------------------------------------------------------------------- Subtracts the", "self.distinct() expr = [\"DECODE({}\"] text_info = \"\\n\" for k in", "digits after the comma. Parameters ---------- n: int Number of", "column in all_cols: try: if (quote_ident(column) in func) or (", "Parameters ---------- breaks: list List of values used to cut", "\"AVG({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ), \"STDDEV({})", "of correlated features. use_numbers_as_suffix: bool, optional Uses numbers as suffix", "as percents. Parameters ---------- k: int, optional Number of most", "\"\"\" check_types([(\"k\", k, [int, float]), (\"dropna\", dropna, [bool])]) topk =", "Normalization\") return self.parent # ---# def nsmallest(self, n: int =", "New type. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.astype", "may obtain a copy of the License at # #", "not normalize {} using the MIN and the MAX. MAX", "\"ABS({} - {}) / {} < {}\".format( self.alias, result[\"avg\"][0], result[\"std\"][0],", "'sem' (standard error of mean). Returns ------- float sem See", "if isinstance(val, str): val = val.replace(\"'\", \"''\") if val !=", "self.parent.__add_to_history__( \"[Mean Encode]: The vColumn {} was transformed using a", "= [ \"(SELECT READ_TREE(USING PARAMETERS model_name = '{}', tree_id =", "Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. vDataFrame[].label_encode", "aggregations. \"\"\" return self.aggregate([\"stddev\"]).values[self.alias][0] stddev = std # ---# def", "Parameters ---------- x: float If the vColumn type is date", "DECADE / DOQ / DOW / DOY / EPOCH /", "def __repr__(self): return self.head(limit=verticapy.options[\"max_rows\"]).__repr__() # ---# def _repr_html_(self): return self.head(limit=verticapy.options[\"max_rows\"])._repr_html_()", "alpha: float = 0.05 ): \"\"\" --------------------------------------------------------------------------- Drops outliers in", "0: limit = 0 limit = \" LIMIT {}\".format(limit) else:", "vColumn with user defined Encoding. vDataFrame[].discretize : Discretizes the vColumn.", "for conducting # data science projects on data stored in", "from verticapy.plot import gen_cmap kwargs[\"cmap\"] = gen_cmap()[0] else: if not", "Information Value (IV) / Weight Of Evidence (WOE) Table. It", ": exponential function floor : value down to the next", "\"backfill\", ], ), (\"expr\", expr, [str]), (\"by\", by, [list]), (\"order_by\",", "self.parent.__update_catalog__(erase=True, columns=[self.alias]) total = abs(self.count() - total) except Exception as", "self.parent[copy_name].transformations += [ (\"{}\", self.ctype(), self.category()) ] self.parent[copy_name].transformations += [(func,", "bound quantile(alpha) and as upper bound quantile(1-alpha) if 'use_threshold' is", "self.parent.filter( \"({} BETWEEN {} AND {})\".format(self.alias, p_alpha, p_1_alpha) ) return", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"avg\"]).values[self.alias][0] mean =", "If set to True, the record will be sliced using", "kernel, [\"gaussian\", \"logistic\", \"sigmoid\", \"silverman\"]), (\"bandwidth\", bandwidth, [int, float]), (\"nbins\",", "is in each of the vColumn records. vDataFrame[].str_count : Computes", "self.category() == \"date\" # ---# def isin(self, val: list, *args):", "---# def topk(self, k: int = -1, dropna: bool =", "ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS woe, CASE WHEN non_events =", "if \"percent\" in elem: self.catalog[elem] = sauv[elem] elif elem ==", "of two flower brackets {}. For example to apply the", "True, draw an Area Plot. step: bool, optional If set", "ORDER BY _verticapy_cnt_ ASC LIMIT 1\".format( self.alias, self.alias, self.parent.__genSQL__(), where,", "vColumn type is date like (date, datetime ...), the parameter", "see utilities.tablesample. See Also -------- vDataFrame[].tail : Returns the a", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"min\"]).values[self.alias][0] # ---# def", "{new_name}.\\nBy changing the parameter 'new_name', you'll be able to solve", "performance can drastically decrease. Returns ------- float/str median See Also", "\"SELECT {} AS apply_test_feature FROM {} WHERE {} IS NOT", "list Distinct caterogies of the vColumn. See Also -------- vDataFrame.topk", "values will not be considered during the computation. n: int,", "include_lowest: op1, close_l = \"<=\", \"[\" elif idx == 0:", "str_slice(self, start: int, step: int): \"\"\" --------------------------------------------------------------------------- Slices the vColumn.", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"sum\"]).values[self.alias][0] # ---# def tail(self,", "(method == \"median\") else \"AVG\" if by == []: if", "= self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) total = abs(self.count() - total) except", "RFmodel_params: dict, optional Dictionary of the Random Forest model parameters", "self.parent.shape()[0] - limit) title = \"Reads {}.\".format(self.alias) tail = to_tablesample(", "{ \"cov\": {}, \"pearson\": {}, \"spearman\": {}, \"spearmand\": {}, \"kendall\":", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"sum\"]).values[self.alias][0] # ---# def", "/ TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK / YEAR Returns -------", "return self.parent # ---# def fillna( self, val=None, method: str", "be numerical to use a mean encoding\" ) max_floor =", ": 'Others'. cat_priority: list, optional List of the different categories", "total += nb where = \"WHERE _verticapy_row_nb_ IN ({})\".format( \",", "non_events / NULLIFZERO(SUM(non_events) OVER ()) AS pt_non_events, events / NULLIFZERO(SUM(events)", "is empty or invalid. max_cardinality: int/tuple, optional Maximum number of", "aggregation. Parameters ---------- method: str, optional The method to use", "when 'ts' is greater than November 1993 the 3rd. area:", "define the outliers. After normalizing the data (Z-Score), if the", "the input element to the vColumn. Parameters ---------- x: float", "False, the function's performance can drastically decrease. Returns ------- float", "\" WHERE {} IS NOT NULL\".format(self.alias) if (dropna) else \"\"", "dtype), dtype, get_category_from_vertica_type(ctype=dtype), ) ] self.parent.__add_to_history__( \"[AsType]: The vColumn {}", "skewness sum : sum std : standard deviation topk :", "result], } return tablesample(values) # ---# def value_counts(self, k: int", "use_threshold: bool = True, alpha: float = 0.05, ): \"\"\"", "Deviation is null !\".format( self.alias ) warnings.warn(warning_message, Warning) return self", "the record is greater than the threshold it will be", "the Store Usage of the vColumn {}.\".format(self.alias), method=\"fetchfirstelem\", ) self.parent.__update_catalog__(", "def min(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'min' (Minimum).", "= \"auto\", ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the pie", "Clips the vColumn using as lower bound quantile(alpha) and as", "{}{} OFFSET {}{}) VERTICAPY_SUBTABLE\".format( self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index_start, limit, )", "float]), ] ) if use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values p_alpha,", "WHEN non_events = 0 OR events = 0 THEN 0", "pie_type: str = \"auto\", ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws", "an aggregation. Parameters ---------- by: str, optional vColumn to use", "--------------------------------------------------------------------------- Returns the vColumn DB type. Returns ------- str vColumn", "Each transformation must be similar to the following: (function, type,", "assert self.isnum() or self.isdate(), TypeError( \"cut only works on numerical", "is in ffill|pad|bfill|backfill then 'order_by' must be a list of", "Returns True if the vColumn is boolean. vDataFrame[].isdate : Returns", "optional The bandwidth of the kernel. kernel: str, optional The", "self.transformations += [trans] sauv = {} for elem in self.catalog:", "elements to return. Returns ------- tablesample An object containing the", ") self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) if by: self.parent.are_namecols_in(by) by =", "method 'same_freq'\" ) count = self.count() nb = int(float(count /", "len(all_new_features) > 1 else \" was \" self.parent.__add_to_history__( \"[Get Dummies]:", "vColumn. \"\"\" check_types( [ (\"length\", length, [int, float]), (\"unit\", unit,", "[sum(result[\"non_events\"])] result.values[\"events\"] += [sum(result[\"events\"])] result.values[\"pt_non_events\"] += [\"\"] result.values[\"pt_events\"] += [\"\"]", "== \"robust_zscore\": self.catalog[\"median\"] = 0 self.catalog[\"mad\"] = 1 / 1.4826", "number of the vColumn distinct elements to be used as", "{} ELSE {} END)\".format( \"{}\", p_alpha, mean_alpha, \"{}\", p_1_alpha, mean_1_alpha,", "optional If set to True, the information will be stored", "be stored in the vDataFrame history. Returns ------- vDataFrame self.parent", "each vColumn category. In this case, the parameter 'numcol' must", "'upper' must have a numerical value\" ) lower_when = (", "not (\"legend\" in kwargs): kwargs[\"legend\"] = True if not (\"figsize\"", "query = \"(SELECT {} FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE\".format( self.alias,", "- 1], result[i] ) trans += \" ELSE NULL END)\"", "the vColumn 'of' (ex: 50% to get the median). It", "quantiles. Parameters ---------- ts: str TS (Time Series) vColumn to", "undefined Returns ------- str vColumn category. See Also -------- vDataFrame[].ctype", "self.alias, bin_spatial_to_str(self.category(), self.alias), self.alias, kwargs[\"agg\"], self.parent.__genSQL__(), self.alias, ) query_result =", "number of non-missing elements cvar : conditional value at risk", "tail = to_tablesample( \"SELECT {} AS {} FROM {}{} LIMIT", "aggregations. \"\"\" return self.aggregate([\"min\"]).values[self.alias][0] # ---# def mode(self, dropna: bool", "chart. It can also be a cutomized aggregation (ex: AVG(column1)", "|_)\\/ |_)(_|(_|| \\_/|_|(_||| # / # ____________ ______ # /", "-------- vDataFrame[].str_contains : Verifies if the regular expression is in", "new_column.format(self.alias), self.parent.__genSQL__() ), print_time_sql=False, ) except: new_column = \"COALESCE({}, {}({})", "compute the aggregation. h: int/float/tuple, optional Interval width of the", "added to the vDataFrame.\".format( self.alias, name ) ) return self.parent", "self.category())] self.transformations += [ (\"AVG({}) OVER (PARTITION BY {})\".format(response, \"{}\"),", "# ---# def decode(self, *argv): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn", "BY {}))\".format( \"{}\", fun, \"{}\", \", \".join(by) ) else: new_column", "= sauv[elem] elif elem == None: self.catalog[elem] = None elif", "the vColumn using a time series rule. \"\"\" return self.apply(func=\"DATE_PART('{}',", "UNION ALL \".join(query) ) title = \"Describes the statics of", "vDataFrame input aggregations. \"\"\" check_types([(\"x\", x, [int, float], (\"approx\", approx,", "\"cat_stats\": values = { \"index\": [\"name\", \"dtype\"] + index, \"value\":", "round(h, 6) if self.category() == \"int\": h = int(max(math.floor(h), 1))", "\"max_depth\": 10} to train a Random Forest with 20 trees", "self.catalog[\"percent\"] = ( 100 * (int(sauv[\"count\"]) + total) / self.parent.shape()[0]", "ParameterError( \"Length of parameter breaks must be equal to the", "\"percent\": 100.0, \"unique\": 2, \"approx_unique\": 2, \"prod\": 0, }, )", "optional Method to use to impute the missing values. auto", "/ self.parent.shape()[0] ) except: pass self.parent.__add_to_history__( \"[Discretize]: The vColumn {}", "the parameter 'name', you'll be able to solve this issue.\"", "occurent elements to return. Returns ------- tablesample An object containing", "h = round(h, 6) if self.category() == \"int\": h =", "0.25) AS 'approx_25%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.5)", "the filling.\".format(e)) if total > 0: try: if \"count\" in", "result[7], ) elif self.isdate(): min_date = self.min() table = \"(SELECT", "1): first_elem, second_elem = breaks[idx], breaks[idx + 1] if right:", "be updated when the parent vDataFrame is modified. Attributes ----------", "# \\ \\__| \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\", "\"\"\" --------------------------------------------------------------------------- Adds a copy vColumn to the parent vDataFrame.", "+ [(expr, \"bool\", \"int\")] new_vColumn = vColumn( name, parent=self.parent, transformations=transformations,", "\"\"\" return self.category() == \"date\" # ---# def isin(self, val:", "digits to keep after the comma. Returns ------- vDataFrame self.parent", "self.parent.shape()[0] if isinstance(index_stop, int): if index_stop < 0: index_stop +=", "of discretization using the method 'smart'.\" ) assert response, ParameterError(", "\" DESC\" partition_by = ( \"PARTITION BY {}\".format( \", \".join([quote_ident(column)", "---# def spider( self, by: str = \"\", method: str", "if method != \"robust_zscore\": max_floor = 0 for elem in", "expression is in each of the vColumn records. vDataFrame[].extract :", "[ (\"ts\", ts, [str]), (\"by\", by, [str]), (\"start_date\", start_date, [str,", "\"\", \"\" for category in cat: tmp_query = \"\"\"SELECT '{0}'", "category : 'Others'. cat_priority: list, optional List of the different", "quantile(alpha) and as upper bound quantile(1-alpha) if 'use_threshold' is set", "with 0 and 1 (1 meaning global outlier). \"\"\" check_types(", "-------- vDataFrame[].fill_outliers : Fills the vColumn outliers using the input", "____________ ______ # / __ `\\ / / # |", "min, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.25) AS Q1, APPROXIMATE_PERCENTILE({}", "self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) assert self.parent[response].isnum(), TypeError( \"The response column", "{} ELSE NULL END)\".format( \"{}\", p_alpha, p_1_alpha, \"{}\" ) )", "bar return bar(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds)", ") query = \"SELECT {} AS index, non_events, events, pt_non_events,", "WHERE {} IS NOT NULL GROUP BY 1) x ORDER", "gain in performance. The catalog will be updated when the", "str, Function in pure SQL used to transform the vColumn.", "the number of matches for the regular expression in each", "+= \"\\t{} => {}\".format(distinct_elements[k], k) expr = \", \".join(expr) +", "the head of the vColumn. \"\"\" return self.iloc(limit=limit, offset=-1) #", "\"_\") .replace(\"/\", \"_\") .replace(\",\", \"_\") .replace(\"'\", \"_\") ) expr =", "h > 0.000001: h = round(h, 6) if self.category() ==", "/ Weight Of Evidence (WOE) Table. It tells the predictive", "def str_replace(self, to_replace: str, value: str = \"\"): \"\"\" ---------------------------------------------------------------------------", ") count = self.count() nb = int(float(count / int(nbins))) assert", "result] elif self.isnum() and method in (\"same_width\", \"auto\"): if not", "the vColumn is numerical, False otherwise. Returns ------- bool True", "xlim=(xmin, xmax), ax=ax, **updated_dict(param, style_kwds, idx), ) custom_lines += [", ": remainder of a division operation pow : number raised", "parameter 'labels' must be empty.\" ) conditions, column = [],", "2): \"\"\" --------------------------------------------------------------------------- Applies a default function to the vColumn.", ": Number of elements. density : Percentage of the distribution.", "elem[2] if elem[2] != None else \"NULL\", ) for elem", "[name] all_new_features += [name] conj = \"s were \" if", "ParameterError( \"At least 'lower' or 'upper' must have a numerical", "\"\" if (k < 1) else \"LIMIT {}\".format(k) dropna =", "vColumn is boolean, False otherwise. Returns ------- bool True if", "self.parent.format_colnames(by) nullifzero, n = 1, len(by) if self.isbool(): warning_message =", "self.aggregate([\"aad\"]).values[self.alias][0] # ---# def abs(self): \"\"\" --------------------------------------------------------------------------- Applies the absolute", "the input functions. Parameters ---------- func: list List of the", "( 100 * (int(sauv[\"count\"]) + total) / self.parent.shape()[0] ) except:", "nth most occurent element. See Also -------- vDataFrame.aggregate : Computes", "sum(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'sum'. Returns -------", ": percent of non-missing elements q% : q quantile (ex:", "based on an aggregation. \"\"\" check_types( [ (\"by\", by, [str]),", "(PARTITION BY {}))\".format( \"{}\", fun, \"{}\", \", \".join(by) ) elif", "tells the predictive power of an independent variable in relation", "when drawing the box plot. The other categories will be", "the equal frequency histogram bins.\", method=\"fetchall\", ) result = [elem[0]", "float): category, ctype = \"float\", \"float\" elif method == \"0ifnull\":", "pat, [str])]) return self.apply( func=\"REGEXP_SUBSTR({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) #", "response = self.parent.format_colnames(response) assert self.parent[response].isnum(), TypeError( \"The response column must", "limit) title = \"Reads {}.\".format(self.alias) tail = to_tablesample( \"SELECT {}", "q, [tuple]), ( \"start_date\", start_date, [str, datetime.datetime, datetime.date, int, float],", "self.parent See Also -------- vDataFrame[].drop_outliers : Drops outliers in the", "\"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using the input method. Parameters", "\"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if elem[0] != None else \"NULL\", elem[1] if", "= \"\", area: bool = False, step: bool = False,", "str = \"\", max_cardinality: Union[int, tuple] = (6, 6), h:", "in kwargs): kwargs[\"figsize\"] = (14, 10) return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs) #", "\"WITH vdf_table AS (SELECT * FROM {}) {}\".format( self.parent.__genSQL__(), query", "self.apply(func=\"DATE_PART('{}', {})\".format(field, \"{}\")) # ---# def decode(self, *argv): \"\"\" ---------------------------------------------------------------------------", "\"{} = '{}'\".format(self.parent[by].alias, column) )[self.alias].density( bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=(xmin, xmax),", "information, see utilities.tablesample. See Also -------- vDataFrame.analytic : Adds a", "self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) from verticapy.plot import range_curve_vdf return range_curve_vdf(", "the Information Value (IV) / Weight Of Evidence (WOE) Table.", "+ sys.getsizeof(self.catalog) ) for elem in self.catalog: total += sys.getsizeof(elem)", "int Size of the slicing. Returns ------- vDataFrame self.parent See", "be considered during the computation. n: int, optional Integer corresponding", "== None: mean_alpha = \"NULL\" self.apply( func=\"(CASE WHEN {} <", "int): cast = \"::float\" if self.category() == \"float\" else \"\"", "[int, float]), (\"h\", h, [int, float]), (\"cat_priority\", cat_priority, [list]), ]", "parameters used to compute the best splits when 'method' is", "] ) method = method.lower() self.parent.are_namecols_in([elem for elem in order_by]", "= \"\", max_cardinality: int = 6, nbins: int = 0,", "def var(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'var' (Variance).", "return boxplot(self, by, h, max_cardinality, cat_priority, ax=ax, **style_kwds) # ---#", "normalize {} using a Robust Z-Score - The MAD is", "describe method. auto : Sets the method to 'numerical' if", "[column] if not (\"cmap\" in kwargs): from verticapy.plot import gen_cmap", "executeSQL( \"SELECT {}, {} FROM {} LIMIT 1\".format( avg, stddev,", "\"\"\" --------------------------------------------------------------------------- Returns True if the vColumn is boolean, False", "# ---# def str_extract(self, pat: str): \"\"\" --------------------------------------------------------------------------- Extracts the", "in result], \"percent\": [float(round(item[2], 3)) for item in result], }", "\"\"\" check_types([(\"to_replace\", to_replace, [str]), (\"value\", value, [str])]) return self.apply( func=\"REGEXP_REPLACE({},", "vColumn {} was transformed using a mean encoding with {}", "\\ \\'/ \\ \\ \\____ \\ \\ \\/\\ \\ \\", "max : maximum mean : average median : median min", "import verticapy.stats as st return self.apply(func=st.decode(str_sql(\"{}\"), *argv)) # ---# def", "USING PARAMETERS percentile = 0.9) AS 'approx_90%', MAX({3}{4}) AS max", "categories to normalize.\", method=\"fetchall\", ) for i in range(len(result)): if", "---# def nunique(self, approx: bool = True): \"\"\" --------------------------------------------------------------------------- Aggregates", "max_cardinality: int, optional Maximum number of vColumn distinct elements to", "discretization using the method 'same_freq'\" ) count = self.count() nb", "already the alias {name}.\\nBy changing the parameter 'name', you'll be", "\"VERTICAPY_NOT_PRECOMPUTED\": if not (dropna) and (pre_comp != None): return pre_comp", "---# def iv_woe(self, y: str, nbins: int = 10): \"\"\"", ">= 2, ParameterError( \"Length of parameter 'breaks' must be greater", "Uses the Random Forest on a response column to find", "------- vDataFrame self.parent See Also -------- vDataFrame[].decode : Encodes the", "/ 3)] sturges : Sturges [CEIL(log2(n)) + 1] Returns -------", "winsorize : Clips the vColumn using as lower bound quantile(alpha)", "return top # ---# def mul(self, x: float): \"\"\" ---------------------------------------------------------------------------", "values = to_tablesample(query, title=title).values elif ( ((distinct_count < max_cardinality +", ") for elem in self.catalog: total += sys.getsizeof(elem) return total", "using 'avg' (Average). Returns ------- float average See Also --------", "self.parent.__genSQL__(), dropna, self.alias, topk, ) result = executeSQL( query, title=\"Computing", "0 if index_start < 0: index_start += self.parent.shape()[0] if isinstance(index_stop,", "into one unique category. h: float, optional The interval size", "Label Encoding. vDataFrame[].mean_encode : Encodes the vColumn using the mean", "the input expression. \"\"\" self.parent.filter(\"{} IS NOT NULL\".format(self.alias)) return self.parent", "index = [\"unique\", \"count\"] + [item[0] for item in query_result]", "for testing to be able to look at the final", "histogram of the vColumn based on an aggregation. Parameters ----------", "a Step Plot. ax: Matplotlib axes object, optional The axes", "distinct_elements = self.distinct() expr = [\"DECODE({}\"] text_info = \"\\n\" for", "to the input vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) if", "use to discretize the vColumn. auto : Uses method 'same_width'", "(\"method\", method, [\"winsorize\", \"null\", \"mean\"]), (\"alpha\", alpha, [int, float]), (\"use_threshold\",", "med, mad), \"float\", \"float\", ) ] else: warning_message = \"Can", "for item in self.transformations], catalog=self.catalog, ) setattr(self.parent, name, new_vColumn) setattr(self.parent,", "0, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the bar chart", "use to normalize. zscore : Normalization using the Z-Score (avg", "second argument. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply", ") executeSQL( \"SELECT {} FROM {} LIMIT 1\".format( new_column.format(self.alias), self.parent.__genSQL__()", "\\ / / / # \\/ / / # /", "bound to the lower bound itself and the values higher", "\"\"\" if isinstance(pie_type, str): pie_type = pie_type.lower() check_types( [ (\"method\",", "on an aggregation. \"\"\" check_types( [ (\"by\", by, [str]), (\"kernel\",", "\".join(all_new_features) ) + \".\" ) return self.parent one_hot_encode = get_dummies", "query = \"WITH vdf_table AS (SELECT * FROM {}) (SELECT", "vColumn_min, vColumn_025, vColumn_075, vColumn_max = ( result[0], result[3], result[4], result[6],", "by = self.parent.format_colnames(by) from verticapy.plot import ts_plot return ts_plot( self,", "kernel: str = \"gaussian\", nbins: int = 200, xlim: tuple", "integer k of the 'topk' method. new_category: str, optional The", "optional Uses the Gaussian distribution to define the outliers. After", "return self.apply(func=\"{} / ({})\".format(\"{}\", x)) # ---# def drop(self, add_history:", "== \"robust_zscore\": if n > 0: warning_message = \"The method", "case of discretization using the method 'same_freq'\" ) count =", "ts_plot( self, ts, by, start_date, end_date, area, step, ax=ax, **style_kwds,", "vDataFrame.plot : Draws the time series. \"\"\" check_types( [ (\"ts\",", "\", \".join([\"1\"] + nth_elems + [str(count)]) ) query = \"SELECT", "APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.75) AS 'approx_75%', APPROXIMATE_PERCENTILE", "outliers in the vColumn. Parameters ---------- threshold: float, optional Uses", "if n > 0: warning_message = \"The method 'robust_zscore' is", "used to compute other vColumns. Parameters ---------- add_history: bool, optional", ") if method == \"winsorize\": self.clip(lower=p_alpha, upper=p_1_alpha) elif method ==", "use to partition the TS. start_date: str / date, optional", "self.parent See Also -------- vDataFrame[].dropna : Drops the vColumn missing", "\"\"\" check_types( [ (\"RFmodel_params\", RFmodel_params, [dict]), (\"return_enum_trans\", return_enum_trans, [bool]), (\"h\",", "bool, optional If set to True, the information will be", "ffill|pad|bfill|backfill then 'order_by' must be a list of at least", "std # ---# def store_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "vColumn using multiple statistical aggregations: min, max, median, unique... depending", ") else: trans = (\"FLOOR({}) || ''\", \"varchar\", \"text\") else:", "--------------------------------------------------------------------------- Replaces the regular expression matches in each of the", "[int, float]), ( \"method\", method, [\"auto\", \"smart\", \"same_width\", \"same_freq\", \"topk\"],", "the Equal Frequency discretization\" ) total, query, nth_elems = nb,", "final_transformation = [ ( \"({} - {}) / {}({})\".format( \"{}\",", "the vColumn with One-Hot Encoding. vDataFrame[].mean_encode : Encodes the vColumn", "'skewness'. Returns ------- float skewness See Also -------- vDataFrame.aggregate :", "vDataFrame[].decode : Encodes the vColumn with a user defined Encoding.", ") ) else: p_alpha, p_1_alpha = ( self.parent.quantile([alpha, 1 -", "approx=approx) # ---# def memory_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn", "cmin, ), \"float\", \"float\", ) ] if method != \"robust_zscore\":", "BY count DESC\" ).format(self.alias, max_cardinality + 1) query = \"WITH", "elif self.isnum() and method == \"same_freq\": assert nbins >= 2,", "[ (\"AVG({}) OVER (PARTITION BY {})\".format(response, \"{}\"), \"int\", \"float\") ]", ") title = \"Computing WOE & IV of {} (response", "outliers using the input method. \"\"\" check_types([(\"lower\", lower, [float, int]),", "------- tablesample An object containing the result. For more information,", "# ---# def numh(self, method: str = \"auto\"): \"\"\" ---------------------------------------------------------------------------", "{}, \"regr_avgx\": {}, \"regr_avgy\": {}, \"regr_count\": {}, \"regr_intercept\": {}, \"regr_r2\":", "(is_date) and not (method == \"categorical\"): result = self.aggregate([\"count\", \"min\",", "Z-Score (avg and std). (x - avg) / std robust_zscore", "None elif method == \"robust_zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"approx_50%\"])", "by = self.parent.format_colnames(by) from verticapy.plot import gen_colors from matplotlib.lines import", "False otherwise. Returns ------- bool True if the vColumn category", "End Date. For example, time = '03-11-1993' will filter the", "float, optional Number representing the outliers threshold. Values lesser than", "FROM vdf_table WHERE {0} IS NOT NULL GROUP BY {0}", "\"\"\" --------------------------------------------------------------------------- Divides the vColumn by the input element. Parameters", "to plot on. **style_kwds Any optional parameter to pass to", "the vColumn using 'kurtosis'. Returns ------- float kurtosis See Also", "using 'median'. Parameters ---------- approx: bool, optional If set to", "tuple] = (6, 6), h: Union[int, float, tuple] = (None,", "the optimal h. auto : Combination of Freedman Diaconis and", "FROM vdf_table WHERE {} > {})\".format( self.parent.__genSQL__(), self.alias, self.alias, p_alpha,", "not be empty\" ) assert not (self.parent.is_colname_in(name)), NameError( f\"A vColumn", "NOT NULL\".format(self.alias)) return self.parent # ---# def fill_outliers( self, method:", "and other statistical information. Parameters ---------- k: int, optional Number", "more information, see utilities.tablesample. See Also -------- vDataFrame[].head : Returns", ")[self.alias].density( bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=(xmin, xmax), ax=ax, **updated_dict(param, style_kwds, idx),", "== \"minmax\": if n == 0: nullifzero = 0 cmin,", "= \"SELECT {}::{} AS {} FROM {} WHERE {} IS", "\"count\": [int(item[1]) for item in result], \"percent\": [float(round(item[2], 3)) for", "* ## # # __ __ ______ ______ __ __", "GROUP BY {}\".format( by[0], self.alias, self.alias, self.parent.__genSQL__(), by[0], ), title=\"Computing", "---# def __repr__(self): return self.head(limit=verticapy.options[\"max_rows\"]).__repr__() # ---# def _repr_html_(self): return", "vColumn. nbins: int, optional Maximum number of nbins used for", "Offset. Returns ------- tablesample An object containing the result. For", "the input method. Parameters ---------- method: str, optional Method to", "Returns True if the vColumn is numerical, False otherwise. Returns", "be a list of at least one element to use", ") except: pass total = int(total) conj = \"s were", "self.parent # ---# def aggregate(self, func: list): \"\"\" --------------------------------------------------------------------------- Aggregates", "p_alpha, p_1_alpha, \"{}\" ) ) elif method == \"mean\": query", "--------------------------------------------------------------------------- Filters the vDataFrame where the vColumn is missing. Returns", "\"method\", method, [\"auto\", \"smart\", \"same_width\", \"same_freq\", \"topk\"], ), (\"return_enum_trans\", return_enum_trans,", "result = [elem[0] for elem in result] except: drop(tmp_view_name, method=\"view\")", "during the computation. n: int, optional Integer corresponding to the", "result[4], result[6], result[7], ) elif self.isdate(): min_date = self.min() table", "topk, ) result = executeSQL( query, title=\"Computing the top{} categories", "val = val.replace(\"'\", \"''\") if val != None: new_column =", "element from the vColumn. Parameters ---------- x: float If the", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"mad\"]).values[self.alias][0]", "[cat_priority] check_types( [ (\"by\", by, [str]), (\"max_cardinality\", max_cardinality, [int, float]),", "+= [str(total)] total += nb where = \"WHERE _verticapy_row_nb_ IN", "The integer k of the 'topk' method. new_category: str, optional", "non_events = 0 OR events = 0 THEN 0 ELSE", "method.lower() assert (method != \"cat_stats\") or (numcol), ParameterError( \"The parameter", "title=\"Computing the descriptive statistics of {}.\".format(self.alias), method=\"fetchall\", ) result =", "(PARTITION BY {}))\".format( \"{}\", fun, \"{}\", \", \".join(by) ) else:", "by[0], self.alias, self.alias, self.parent.__genSQL__(), by[0], ), title=\"Computing the different categories", "def skewness(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'skewness'. Returns", "\" WHERE {} = '{}'\".format( bin_spatial_to_str(self.category(), self.alias), category, ) )", "\"\"\" check_types([(\"response\", response, [str])]) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) assert self.parent[response].isnum(),", "not (dropna) and (pre_comp != None): return pre_comp assert n", "when applying the func 'x -> {}' to '{}'\".format( e,", "= [], order_by: list = [], ): \"\"\" --------------------------------------------------------------------------- Fills", "{})\".format(x, \"{}\")) else: return self.apply(func=\"{} + ({})\".format(\"{}\", x)) # ---#", "Random Forest model parameters used to compute the best splits", "# ---# def topk(self, k: int = -1, dropna: bool", "self.alias, self.parent.__genSQL__() ) query = \"SELECT COUNT({}) AS NAs, MIN({})", "Back Propagation of the next element (Constant Interpolation). ffill :", "\" END\" self.apply(func=expr) # ---# def ctype(self): \"\"\" --------------------------------------------------------------------------- Returns", "+ [item[1] for item in query_result] index = [\"unique\", \"count\"]", "more information, see utilities.tablesample. See Also -------- vDataFrame.iv_woe : Computes", ") warnings.warn(warning_message, Warning) return self elif (n == 1) and", "times, so it's better practice to use this method when", "== \"minmax\": self.catalog[elem] = (sauv[elem] - sauv[\"min\"]) / ( sauv[\"max\"]", "/ / # |____/ / / # _____________ / /", "comma. Parameters ---------- n: int Number of digits to keep", "from collections.abc import Iterable from typing import Union # VerticaPy", "in the vColumn and it returns the new vDataFrame of", "stddev = ( \"AVG({}) OVER (PARTITION BY {})\".format( self.alias, \",", "datetime.date] = \"\", end_date: Union[str, datetime.datetime, datetime.date] = \"\", area:", "float]), (\"cat_priority\", cat_priority, [list]), ] ) if by: self.parent.are_namecols_in(by) by", "the dummies. drop_first: bool, optional Drops the first dummy to", "{} ORDER BY _verticapy_cnt_ DESC {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.shape()[0],", "be transformed. Parameters ---------- pat: str Regular expression. Returns -------", "= get_category_from_vertica_type(ctype=ctype) all_cols, max_floor = self.parent.get_columns(), 0 for column in", "events = 0 THEN 0 ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END", "float var See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "of the different transformations. \"\"\" # # Special Methods #", "the second argument. Returns ------- vDataFrame self.parent See Also --------", "return tail # ---# def isbool(self): \"\"\" --------------------------------------------------------------------------- Returns True", "int Slice size. unit: str, optional Slice size unit. For", "an input value. \"\"\" check_types([(\"start\", start, [int, float]), (\"step\", step,", "def round(self, n: int): \"\"\" --------------------------------------------------------------------------- Rounds the vColumn by", "= \"zscore\", by: list = [], return_trans: bool = False", "h: float, optional Interval width if the vColumn is numerical", "on the right. Returns ------- vDataFrame self.parent See Also --------", "\"]\" else: op1, op2, close_l, close_r = \"<=\", \"<\", \"[\",", "str = \"\", by: list = [], order_by: list =", "function to the vColumn. \"\"\" check_types( [ ( \"func\", func,", "nbins, h, ax=ax, **style_kwds) # ---# def boxplot( self, by:", "\"\", max_cardinality: int = 6, h: float = 0, pie_type:", "top # ---# def mul(self, x: float): \"\"\" --------------------------------------------------------------------------- Multiplies", ") ) # ---# def spider( self, by: str =", "< {} THEN {} \".format(\"{}\", lower, lower) if (isinstance(lower, (float,", "outliers by the NULL value. winsorize : Clips the vColumn", "elem in copy_trans] for elem in sauv: self.catalog[elem] = sauv[elem]", "str, transformations: list = [], parent=None, catalog: dict = {}", "title=\"Computing the equal frequency histogram bins.\", method=\"fetchall\", ) result =", "{} BETWEEN {} AND {} THEN '[{};{}]' \".format( \"{}\", result[i", "method = method.lower() self.parent.are_namecols_in([elem for elem in order_by] + by)", "--------------------------------------------------------------------------- Aggregates the vColumn using 'min' (Minimum). Returns ------- float/str", "# ---# def nunique(self, approx: bool = True): \"\"\" ---------------------------------------------------------------------------", "!= None ] ), ) stddev = \"DECODE({}, {}, NULL)\".format(", "from verticapy.plot import range_curve_vdf return range_curve_vdf( self, ts, q, start_date,", "] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Mean Encode]: The vColumn {} was", "\"int\": best_h = max(math.floor(best_h), 1) return best_h # ---# def", "not in kwargs: query = \"SELECT {} AS {} FROM", "DB version you are using. Returns ------- tablesample An object", "( \"(CASE WHEN {} IN ({}) THEN {} || ''", "tuple, optional Tuple including the 2 quantiles used to draw", "elem in order_by] for elem in all_partition: if len(self.parent[elem].transformations) >", "------- int number of non-Missing elements. See Also -------- vDataFrame.aggregate", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"sem\"]).values[self.alias][0] # ---#", "vDataFrame[].ctype : Returns the vColumn database type. \"\"\" return self.transformations[-1][2]", "*= 1.4826 if mad != 0: if return_trans: return \"({}", "stddev ), \"float\", \"float\", ) ] elif method == \"robust_zscore\":", "= (sauv[elem] - sauv[\"approx_50%\"]) / ( 1.4826 * sauv[\"mad\"] )", "k > 0 else \"\", self.alias ), method=\"fetchall\", ) values", "a new vColumn to the vDataFrame by using an advanced", "[bool]), ] ) method = method.lower() self.parent.are_namecols_in(by) by = self.parent.format_colnames(by)", "= '03-11-1993' will filter the data when 'ts' is greater", "by an input value. The vColumn will be transformed. Parameters", "vColumn category. In this case, the parameter 'numcol' must be", "self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) total = abs(self.count() -", "considered as categorical. numcol: str, optional Numerical vColumn to use", "---------- response: str Response vColumn. Returns ------- vDataFrame self.parent See", "int, optional Maximum number of vColumn distinct elements to be", "COUNT(*) / {} AS percent FROM {}{} GROUP BY {}", "when 'ts' is greater than November 1993 the 3rd. plot_median:", "self.alias, topk, ) result = executeSQL( query, title=\"Computing the top{}", "be included. right: bool, optional How the intervals should be", "missing values. auto : Mean for the numerical and Mode", "elif isinstance(index, int): cast = \"::float\" if self.category() == \"float\"", "[CEIL(log2(n)) + 1] Returns ------- float optimal bar width. \"\"\"", "{op2} '{second_elem}' THEN '{label}'\" ] expr = \"CASE WHEN \"", "merge the other into one unique category. h: float, optional", "be filled. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].drop_outliers", "\"It can be the result of using previously the method", "if n == 1: pre_comp = self.parent.__get_catalog_value__(self.alias, \"top\") if pre_comp", "in the partition. return_trans: bool, optimal If set to True,", "average absolute deviation approx_unique : approximative cardinality count : number", "[bool]), ] ) start_or_end = \"START\" if (start) else \"END\"", "OFFSET {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), limit, offset, ),", ") # ---# def str_count(self, pat: str): \"\"\" --------------------------------------------------------------------------- Computes", "percent : percent of non-missing elements q% : q quantile", "see utilities.tablesample. See Also -------- vDataFrame[].nlargest : Returns the n", "{}'s lower and upper outliers.\".format( self.alias ), method=\"fetchall\", ) ]", "percentile = 0.1) AS 'approx_10%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile", "computed. pie_type: str, optional The type of pie chart. auto", "{})\".format(\"{}\", start, step)) # ---# def sub(self, x: float): \"\"\"", "[distinct_count, self.count()] + [item[1] for item in query_result] index =", "Returns the vColumn most occurent elements. \"\"\" if \"agg\" not", "If set to True, the approximate cardinality is returned. By", "limit: int = 5): \"\"\" --------------------------------------------------------------------------- Returns the head of", ": Back Propagation of the next element (Constant Interpolation). ffill", "---------- func: str, Function in pure SQL used to transform", "{}'.\".format( copy_name.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) ) else: for k", "verticapy.errors import * ## # # __ __ ______ ______", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"mad\"]).values[self.alias][0] # ---#", "expression in each record of the vColumn. vDataFrame[].str_slice : Slices", "If set to True, NULL values will not be considered", ") assert len(breaks) >= 2, ParameterError( \"Length of parameter 'breaks'", "---------- val: int/float/str, optional Value to use to impute the", "self.category() in [\"date\", \"float\"]: warning_message = ( \"label_encode is only", "6) else \"mode\" total = self.count() if (method == \"mode\")", "quantile (or approximate quantile). See Also -------- vDataFrame.aggregate : Computes", "{} WHERE {} IS NOT NULL GROUP BY {} ORDER", "val += list(args) check_types([(\"val\", val, [list])]) val = {self.alias: val}", ": Sturges [CEIL(log2(n)) + 1] Returns ------- float optimal bar", "--------------------------------------------------------------------------- Aggregates the vColumn using the input functions. Parameters ----------", "normalize.\", method=\"fetchall\", ) for i in range(len(result)): if result[i][2] ==", "IS NOT NULL \".format(self.alias) if (dropna) else \" \" result", "if (nullifzero) else \"\", stddev ) else: final_transformation = [", "self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), limit, offset, ), title=title, ) tail.count =", "of the vColumn 'of'. max : Maximum of the vColumn", "item in sublist] # ---# def div(self, x: float): \"\"\"", "get_dummies # ---# def head(self, limit: int = 5): \"\"\"", "elem[0] != None else \"NULL\", elem[1] if elem[1] != None", "new vDataFrame of the search. Parameters ---------- val: list List", ") query = \"SELECT {}, MIN(ord) AS ord, SUM(1 -", "bool True if the vColumn is numerical. See Also --------", "non_events, events, pt_non_events, pt_events, CASE WHEN non_events = 0 OR", "---# def drop_outliers( self, threshold: float = 4.0, use_threshold: bool", "0.75) AS Q3, MAX({}) AS max FROM {}\".format( self.alias, self.alias,", "# ---# def __repr__(self): return self.head(limit=verticapy.options[\"max_rows\"]).__repr__() # ---# def _repr_html_(self):", "categories to [0, n - 1] (n being the vColumn", "category = get_category_from_vertica_type(ctype=ctype) all_cols, max_floor = self.parent.get_columns(), 0 for column", "be computed. ax: Matplotlib axes object, optional The axes to", "vColumn elements. Parameters ---------- n: int, optional Offset. Returns -------", "vDataFrame[].str_contains : Verifies if the regular expression is in each", "include_lowest, [bool]), (\"right\", right, [bool]), ] ) assert self.isnum() or", "\"\"\" if isinstance(cat_priority, str) or not (isinstance(cat_priority, Iterable)): cat_priority =", "optimized h will be computed. pie_type: str, optional The type", "= True): \"\"\" --------------------------------------------------------------------------- Drops the vColumn from the vDataFrame.", "{}\".format(k) dropna = \" WHERE {} IS NOT NULL\".format(self.alias) if", "not (dropna): n = \"\" if (n == 1) else", "result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].nlargest", "self.add_copy(new_name) parent = self.drop(add_history=False) parent.__add_to_history__( \"[Rename]: The vColumn {} was", "each of the vColumn records. The vColumn will be transformed.", "p_alpha, mean_alpha, \"{}\", p_1_alpha, mean_1_alpha, \"{}\" ) ) return self.parent", "3)) for item in result], } return tablesample(values) # ---#", "- sauv[\"min\"] ) except: pass if method == \"robust_zscore\": self.catalog[\"median\"]", "by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) columns = [self.alias, by] else:", ": interquartile range kurtosis : kurtosis jb : Jarque-Bera index", "VERTICAPY_SUBTABLE WHERE split_value IS NOT NULL GROUP BY 1 ORDER", "ANY KIND, either express or implied. # See the License", "the discretization (must be > 1) Returns ------- tablesample An", "func, [ \"abs\", \"acos\", \"asin\", \"atan\", \"cbrt\", \"ceil\", \"cos\", \"cosh\",", "# See the License for the specific language governing permissions", "by == []: if fun == \"AVG\": val = self.avg()", "n: int = 1): \"\"\" --------------------------------------------------------------------------- Returns the nth most", "{} FROM {}) VERTICAPY_OPTIMAL_H_TABLE\".format( min_date, self.alias, self.alias, self.parent.__genSQL__() ) query", "memory usage. \"\"\" import sys total = ( sys.getsizeof(self) +", "elem in copy_trans] raise QueryError(\"{}\\nAn Error happened during the filling.\".format(e))", "else: model = RandomForestClassifier(tmp_model_name) model.set_params({\"n_estimators\": 20, \"max_depth\": 8, \"nbins\": 100})", "computed aggregations to gain in performance. The catalog will be", "------- float quantile (or approximate quantile). See Also -------- vDataFrame.aggregate", "different categories to consider when drawing the box plot. The", "(schema): schema = \"public\" tmp_view_name = gen_tmp_name(schema=schema, name=\"view\") tmp_model_name =", "The vColumn type must be date like (date, datetime, timestamp...)", "used in the partition. order_by: list, optional List of the", "optional SQL expression. by: list, optional vColumns used in the", "not (self.parent.is_colname_in(name)), NameError( f\"A vColumn has already the alias {name}.\\nBy", "'numcol' must be defined. numerical : Uses popular numerical aggregations", "cmax, cmin, ) else: final_transformation = [ ( \"({} -", "nbins: int, optional Number of bins. If empty, an optimized", "the number of seconds, otherwise it will represent a number.", "'of'. q% : q Quantile of the vColumn 'of' (ex:", "isinstance(index, slice): assert index.step in (1, None), ValueError( \"vColumn doesn't", "isinstance(val, str): val = val.replace(\"'\", \"''\") if val != None:", "/ / # / / # / / # \\", "points to use to evaluate the approximate density function. Increasing", "optional Method to use to fill the vColumn outliers. mean", "Uses numbers as suffix instead of the vColumns categories. Returns", ">= 1, ParameterError(\"Parameter 'n' must be greater or equal to", "\"\"\" check_types([(\"n\", n, [int, float])]) query = \"SELECT * FROM", "(\"step\", step, [bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) if", "\"donut\", \"rose\"]), ] ) donut = True if pie_type ==", "[str])]) try: query = \"SELECT {}::{} AS {} FROM {}", "bin_spatial_to_str(self.category(), self.alias), category, ) ) query += [lp + tmp_query", "time series. \"\"\" check_types( [ (\"ts\", ts, [str]), (\"by\", by,", "suffix instead of the vColumns categories. Returns ------- vDataFrame self.parent", "\"\" ) func = \"(CASE {}{}ELSE {} END)\".format(lower_when, upper_when, \"{}\")", "/ {}({})\".format( self.alias, avg, \"NULLIFZERO\" if (nullifzero) else \"\", stddev", "a number. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply", "absolute value of the record is greater than the threshold,", "the vColumns 1 and 2 bars. It is only valid", ": Applies a function to the input vColumn. \"\"\" check_types([(\"x\",", "See Also -------- vDataFrame.astype : Converts the vColumns to the", "approx, [bool]))]) prefix = \"approx_\" if approx else \"\" return", "check_types([(\"x\", x, [int, float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, {}, {})\".format(x,", "to define the outliers. After normalizing the data (Z-Score), if", "other types to varchar. same_freq : Computes bins with the", "a new vColumn labeled with 0 and 1 (1 meaning", "set to False else the lower and upper ZScores. threshold:", "elem in transformations], ) self.catalog = { \"cov\": {}, \"pearson\":", "during the filling.\".format(e)) if total > 0: try: if \"count\"", "\".join([\"1\"] + nth_elems + [str(count)]) ) query = \"SELECT {}", "The vColumn to use to compute the aggregation. h: int/float/tuple,", "---------- method: str, optional Method to use to fill the", "Draws the range plot of the vColumn. The aggregations used", "end_date: str / date, optional Input End Date. For example,", "better practice to use this method when first preparing your", "True): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'unique' (cardinality). Parameters", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "by an input value. vDataFrame[].str_slice : Slices the vColumn. \"\"\"", "(IV) / Weight Of Evidence (WOE) Table. It tells the", "elif (n == 1) and (self.parent[by[0]].nunique() < 50): try: result", "slice(self, length: int, unit: str = \"second\", start: bool =", "idea is simple: instead of moving # data around for", "Parameters ---------- alias: str vColumn alias. transformations: list, optional List", "optional The name of the merging category when using the", "\"[Drop]: vColumn {} was deleted from the vDataFrame.\".format(self.alias) ) return", "\"pearson\": {}, \"spearman\": {}, \"spearmand\": {}, \"kendall\": {}, \"cramer\": {},", "name the new categories. If empty, names will be generated.", "max(0, self.parent.shape()[0] - limit) title = \"Reads {}.\".format(self.alias) tail =", "than 1.\" ) index_stop = index.stop index_start = index.start if", "else \"\" query, cat = [], self.distinct() if len(cat) ==", "vDataFrame input aggregations. \"\"\" check_types([(\"approx\", approx, [bool])]) if approx: return", "[int, float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, {}, {})\".format(x, \"{}\")) else:", "elements to display. Returns ------- tablesample An object containing the", "record of the vColumn. vDataFrame[].str_replace : Replaces the regular expression", "+ 1] Returns ------- float optimal bar width. \"\"\" check_types(", "model parameters used to compute the best splits when 'method'", "total) / self.parent.shape()[0] ) except: pass total = int(total) conj", "by their respective average. null : Replaces the outliers by", "else \"\", stddev ) else: final_transformation = [ ( \"({}", "[bool]), ] ) distinct_elements = self.distinct() if distinct_elements not in", "2)), 1e-99, ) fd = max(2.0 * (vColumn_075 - vColumn_025)", "MAD is null !\".format( self.alias ) warnings.warn(warning_message, Warning) return self", "to the geopandas plot function. For more information, see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/", "---# def apply(self, func: str, copy_name: str = \"\"): \"\"\"", "the vColumn 'of'. min : Minimum of the vColumn 'of'.", "elements. same_width : Computes regular width bins. smart : Uses", "name = ( name.replace(\" \", \"_\") .replace(\"/\", \"_\") .replace(\",\", \"_\")", "optional Cardinality threshold to use to determine if the vColumn", "Median will be drawn. ax: Matplotlib axes object, optional The", "1.4826 * sauv[\"mad\"] ) elif method == \"zscore\": self.catalog[elem] =", "of bins will be computed. h: float, optional Interval width", "schema = verticapy.options[\"temp_schema\"] if not (schema): schema = \"public\" tmp_view_name", "**style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the spider plot of the", "of the DB version you are using. Returns ------- tablesample", "pie_type = pie_type.lower() check_types( [ (\"method\", method, [str]), (\"of\", of,", "\"SELECT * FROM {} WHERE {} IS NOT NULL ORDER", "1) return best_h # ---# def nunique(self, approx: bool =", "x, [int, float]), ] ) if func not in (\"log\",", "---# def sum(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'sum'.", "method 'same_width' for numerical vColumns, cast the other types to", "), \"STDDEV({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ),", "if drop_first else 0 for k in range(len(distinct_elements) - n):", "= 1 then this method will return the mode of", "\"APPROXIMATE_MEDIAN\" query = \"SELECT {}, {}({}) FROM {} GROUP BY", "---# def nsmallest(self, n: int = 10): \"\"\" --------------------------------------------------------------------------- Returns", "input aggregations. \"\"\" return self.aggregate([\"max\"]).values[self.alias][0] # ---# def mean_encode(self, response:", "to {}\".format( e, self.alias, dtype ) ) # ---# def", "TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK / YEAR Returns ------- vDataFrame", "method == \"smart\": schema = verticapy.options[\"temp_schema\"] if not (schema): schema", "0 ] # ---# def range_plot( self, ts: str, q:", "else str(elem[1]) new_column = \"COALESCE({}, DECODE({}, {}, NULL))\".format( \"{}\", by[0],", "The vColumn '{}' was transformed with the func 'x ->", "'prefix_sep'), you'll be able to solve this \" \"issue.\" )", "k in range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations +=", "+= self.parent.shape()[0] query = \"SELECT {}{} FROM {}{} OFFSET {}", "# ---# def __setattr__(self, attr, val): self.__dict__[attr] = val #", "TIMEZONE_MINUTE / WEEK / YEAR Returns ------- vDataFrame self.parent See", "by] else: columns = [self.alias] if of: self.parent.are_namecols_in(of) of =", "): \"\"\" --------------------------------------------------------------------------- Draws the bar chart of the vColumn", "list = [], parent=None, catalog: dict = {} ): self.parent,", "# ---# def bar( self, method: str = \"density\", of:", "unique... depending on the input method. Parameters ---------- method: str,", "sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) total = abs(self.count() - total)", "---# def sub(self, x: float): \"\"\" --------------------------------------------------------------------------- Subtracts the input", "and method == \"smart\" ): n = len(result) trans =", ": mode (most occurent element). 0ifnull : 0 when the", ") result = executeSQL( query, title=\"Computing the top{} categories of", "Also -------- vDataFrame.filter: Filters the data using the input expression.", "BY COUNT(*) DESC LIMIT {1})\"\"\".format( self.alias, max_cardinality ) if distinct_count", "[\"\"] result.values[\"iv\"] += [sum(result[\"iv\"])] return result # ---# def kurtosis(self):", "/\\ \\ / / /\\ ___\\ /\\ __ \\ /\\", "see utilities.tablesample. See Also -------- vDataFrame[].nsmallest : Returns the n", "else \" was \" if verticapy.options[\"print_info\"]: print(\"{} element{}filled.\".format(total, conj)) self.parent.__add_to_history__(", "(SELECT COUNT(*) AS count\" \" FROM vdf_table WHERE {0} IS", "of the vColumn records by an input value. \"\"\" check_types([(\"start\",", "-------- vDataFrame[].bar : Draws the Bar Chart of vColumn based", "None): val = self.mode(dropna=True) if val == None: warning_message =", "import verticapy from verticapy.utilities import * from verticapy.toolbox import *", "optional Number of most occurent elements to return. Returns -------", "str = \"Others\", RFmodel_params: dict = {}, response: str =", "self.parent.format_colnames(response) drop(tmp_view_name, method=\"view\") self.parent.to_db(tmp_view_name) from verticapy.learn.ensemble import ( RandomForestClassifier, RandomForestRegressor,", "str, optional The method to use to aggregate the data.", "{})\".format( self.alias, cmin, \"NULLIFZERO\" if (nullifzero) else \"\", cmax, cmin,", "non-Missing elements. See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "ParameterError( \"Parameter 'response' can not be empty in case of", "the other into one unique category. h: float, optional The", "{}, NULL)\".format( by[0], \", \".join( [ \"{}, {}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\"))", ".values[self.alias] ) self.parent.filter( \"({} BETWEEN {} AND {})\".format(self.alias, p_alpha, p_1_alpha)", "cmax, cmin, ), \"float\", \"float\", ) ] if method !=", "= executeSQL( \"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}\".format( bin_spatial_to_str(self.category(), self.alias), self.parent.__genSQL__(), ),", "True if the vColumn is boolean. vDataFrame[].isdate : Returns True", "IV of {} (response = {}).\".format(self.alias, y) result = to_tablesample(query,", "CASE WHEN non_events = 0 OR events = 0 THEN", "if isinstance(method, str): method = method.lower() check_types( [ (\"method\", method,", "\"tan\", \"tanh\", ], ), (\"x\", x, [int, float]), ] )", "\"int\"), TypeError( \"The column 'numcol' must be numerical\" ) cast", "of the input vColumns based on an aggregation. \"\"\" check_types(", "result.values[\"events\"] += [sum(result[\"events\"])] result.values[\"pt_non_events\"] += [\"\"] result.values[\"pt_events\"] += [\"\"] result.values[\"woe\"]", "offset < 0: offset = max(0, self.parent.shape()[0] - limit) title", "/ NULLIFZERO(pt_events))) END AS iv FROM ({}) x ORDER BY", "the vColumn using 'std' (Standard Deviation). Returns ------- float std", "if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, {}, {})\".format(x, \"{}\")) else: return self.apply(func=\"{}", "str = \"\"): \"\"\" --------------------------------------------------------------------------- Replaces the regular expression matches", "\"0ifnull\", \"mean\", \"avg\", \"median\", \"ffill\", \"pad\", \"bfill\", \"backfill\", ], ),", ": kurtosis jb : Jarque-Bera index mad : median absolute", "# \\ \\ \\'/ \\ \\ \\____ \\ \\ \\/\\", "if some specific records are in the vColumn and it", "and as upper bound quantile(1-alpha) if 'use_threshold' is set to", "3)] sturges : Sturges [CEIL(log2(n)) + 1] Returns ------- float", "): self.parent, self.alias, self.transformations = ( parent, alias, [elem for", "== \"same_freq\": assert nbins >= 2, ParameterError( \"Parameter 'nbins' must", "{} must be binary to use iv_woe.\".format(y) ) self.parent[y].distinct() trans", "often they occur, and other statistical information. Parameters ---------- k:", "\"fd\"): best_h = fd else: best_h = max(sturges, fd) self.parent.__update_catalog__({\"index\":", "less frequent elements will be gathered together to create a", "( result[0], result[3], result[4], result[6], result[7], ) elif self.isdate(): min_date", "parameters ('prefix', 'prefix_sep'), you'll be able to solve this \"", "max_cardinality, h, ax=ax, **style_kwds, ) # ---# def std(self): \"\"\"", "having steps different than 1.\" ) index_stop = index.stop index_start", "and (pre_comp != None): return pre_comp assert n >= 1,", "{}, ord, non_events, events, non_events / NULLIFZERO(SUM(non_events) OVER ()) AS", "clip(self, lower=None, upper=None): \"\"\" --------------------------------------------------------------------------- Clips the vColumn by transforming", "\"(SELECT READ_TREE(USING PARAMETERS model_name = '{}', tree_id = {}, format", "# ---# def distinct(self, **kwargs): \"\"\" --------------------------------------------------------------------------- Returns the distinct", "if (elem[1] == None) else str(elem[1]) new_column = \"COALESCE({}, DECODE({},", "= self.max() else: xmin, xmax = xlim custom_lines = []", "smart : Uses the Random Forest on a response column", "vDataFrame[].decode : Encodes the vColumn with user defined Encoding. vDataFrame[].get_dummies", "= \"SELECT {} FROM (SELECT {}, ROW_NUMBER() OVER (ORDER BY", "parent = self.parent force_columns = [ column for column in", "the vColumn to the input type. Parameters ---------- dtype: str", "h: int/float/tuple, optional Interval width of the vColumns 1 and", "1: try: result = executeSQL( \"SELECT {}, MIN({}), MAX({}) FROM", "({})\".format(\"{}\", med, mad), \"float\", \"float\", ) ] else: warning_message =", "= \"START\" if (start) else \"END\" return self.apply( func=\"TIME_SLICE({}, {},", ": kth most occurent element (ex: top1 for the mode)", "self.ctype().lower() in (\"bool\", \"boolean\") # ---# def isdate(self): \"\"\" ---------------------------------------------------------------------------", "= self.aggregate([\"mad\", \"approx_median\"]).values[self.alias] mad *= 1.4826 if mad != 0:", "---# def var(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'var'", "if index_stop < 0: index_stop += self.parent.shape()[0] limit = index_stop", ": conditional value at risk dtype : vColumn type iqr", "or (numcol), ParameterError( \"The parameter 'numcol' must be a vDataFrame", "Parameters ---------- by: str, optional vColumn to use to partition", "Replaces the outliers by the NULL value. winsorize : Clips", "interval will be computed. nbins: int, optional Number of bins", "encoding of a response. \"\"\" check_types( [ (\"prefix\", prefix, [str]),", "query=query, title=\"Computing the distinct categories of {}.\".format(self.alias), method=\"fetchall\", ) return", "total = self.count() if method not in [\"mode\", \"0ifnull\"]: max_floor", "vColumn by the input element. Parameters ---------- x: float Input", "example to apply the function: x -> x^2 + 2", "transformation used instead of the parent vDataFrame. This parameter is", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "following: date / int / float / text / binary", ") query_result = executeSQL( query=query, title=\"Computing the descriptive statistics of", "mean encoding of a response. \"\"\" check_types( [ (\"prefix\", prefix,", "the search. Parameters ---------- val: list List of the different", "h, [int, float]), (\"nbins\", nbins, [int, float]), ] ) if", "result[i - 1], result[i], result[i - 1], result[i] ) trans", "for elem in distinct ] ), bin_spatial_to_str(self.category()), new_category.replace(\"'\", \"''\"), ),", "by, [list]), (\"return_trans\", return_trans, [bool]), ] ) method = method.lower()", "Also -------- vDataFrame.expected_store_usage : Returns the vDataFrame expected store usage.", "Vertica, taking advantage Vertica’s # speed and built-in analytics and", "self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] self.parent.__add_to_history__( \"[Add Copy]: A copy of the", "{}.\".format(self.alias), method=\"fetchall\", ) result = [distinct_count, self.count()] + [item[1] for", "\"rose\" else False if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from", "\"\", max_cardinality: Union[int, tuple] = (6, 6), h: Union[int, float,", "NameError( f\"A vColumn has already the alias {name}.\\nBy changing the", "the x limits of the current axes. ax: Matplotlib axes", "vDataFrame input aggregations. \"\"\" return self.aggregate(func=[\"prod\"]).values[self.alias][0] prod = product #", "self.isnum() and method == \"same_freq\": assert nbins >= 2, ParameterError(", "func not in (\"log\", \"mod\", \"pow\", \"round\"): expr = \"{}({})\".format(func.upper(),", "Returns ------- str vColumn category. See Also -------- vDataFrame[].ctype :", "available only if the parameter 'by' is empty\\nIf you want", "See Also -------- vDataFrame[].slice : Slices the vColumn using a", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "def sem(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'sem' (standard", "vColumns categories. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode", "def topk(self, k: int = -1, dropna: bool = True):", "0 ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS woe, CASE WHEN", "stores all user transformations. If the vDataFrame represents the entire", "changing the parameter 'name', you'll be able to solve this", "tree_id = {}, format = 'tabular'))\".format( tmp_model_name, i ) for", "method: str = \"winsorize\", threshold: float = 4.0, use_threshold: bool", "iv FROM ({}) x ORDER BY ord\".format( self.alias, query, )", "THEN argv[2 * i + 1] ... END odd :", "/\\ \"-.\\ \\ # \\ \\ \\'/ \\ \\ \\____", "{} using the following mapping:{}\".format( self.alias, text_info ) ) return", "\"Describes the statics of {} partitioned by {}.\".format( numcol, self.alias", "= True if len(args) > 0: column = args[0] elif", "the lower bound itself and the values higher than the", "= \"(CASE {}{}ELSE {} END)\".format(lower_when, upper_when, \"{}\") self.apply(func=func) return self.parent", "case of discretization using the method 'smart'.\" ) self.parent.are_namecols_in(response) response", "Regular expression. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_count", "method: str = \"auto\", h: float = 0, nbins: int", "Catalog where each key corresponds to an aggregation. vColumns will", "else \" DESC\" partition_by = ( \"PARTITION BY {}\".format( \",", "Returns ------- ax Matplotlib axes object See Also -------- vDataFrame.boxplot", "(isinstance(index_start, int)): index_start = 0 if index_start < 0: index_start", "store_usage = executeSQL( \"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}\".format( bin_spatial_to_str(self.category(), self.alias), self.parent.__genSQL__(),", "x\".format( self.alias, query, ) query = \"SELECT {} AS index,", "median) / (1.4826 * mad) minmax : Normalization using the", "vColumn. Parameters ---------- limit: int, optional Number of elements to", ") warnings.warn(warning_message, Warning) return self elif method == \"minmax\": if", "\"donut\" else False rose = True if pie_type == \"rose\"", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "2 in case of discretization using the method 'same_freq'\" )", "optional If set to True, NULL values will not be", "check_types([(\"new_name\", new_name, [str])]) old_name = quote_ident(self.alias) new_name = new_name.replace('\"', \"\")", "Returns ------- bool True if the vColumn category is date.", "return self.head(limit=verticapy.options[\"max_rows\"])._repr_html_() # ---# def __setattr__(self, attr, val): self.__dict__[attr] =", "- min) by: list, optional vColumns used in the partition.", "The vColumn will be transformed. Parameters ---------- pat: str regular", "to the dependent variable. Parameters ---------- y: str Response vColumn.", "data. count : Number of elements. density : Percentage of", "): \"\"\" --------------------------------------------------------------------------- Draws the Time Series of the vColumn.", "empty, names will be generated. include_lowest: bool, optional If set", "quote_ident(name.replace('\"', \"_\")) assert name.replace('\"', \"\"), EmptyParameter( \"The parameter 'name' must", "[list])]) val = {self.alias: val} return self.parent.isin(val) # ---# def", "-------- vDataFrame[].isdate : Returns True if the vColumn category is", "\"The parameter 'numcol' must be a vDataFrame column if the", "AS 'approx_75%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.9) AS", "== \"sturges\": best_h = sturges elif method.lower() in (\"freedman_diaconis\", \"fd\"):", "convert the vColumn. If this parameter is equal to 0,", "[str]), (\"method\", method, [str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [list]),", "{}\".format( self.alias, len(all_new_features), conj, \", \".join(all_new_features) ) + \".\" )", "\"POWER({}, 2) + 2\". copy_name: str, optional If not empty,", "* FROM {} WHERE {} IS NOT NULL ORDER BY", "for processing, VerticaPy brings the logic to the data. #", "a new feature by evaluating some conditions. vDataFrame[].discretize : Discretizes", "# ---# def sum(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "vColumn expected store usage (unit: b). Returns ------- int vColumn", "!\") return self.apply(func=\"{} / ({})\".format(\"{}\", x)) # ---# def drop(self,", "= 30): \"\"\" --------------------------------------------------------------------------- Returns the k most occurent elements,", ": maximum mean : average median : median min :", "# ---# def clip(self, lower=None, upper=None): \"\"\" --------------------------------------------------------------------------- Clips the", "using an advanced analytical function on a specific vColumn. \"\"\"", "(only if the vColumn type is date like). The vColumn", "to be used as categorical. The less frequent elements will", "vColumn is boolean. vDataFrame[].isnum : Returns True if the vColumn", ": CASE ... WHEN vColumn = argv[2 * i] THEN", "of expressions. The expression generated will look like: even: CASE", "True, ): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'median'. Parameters", "its affiliates. # Licensed under the Apache License, Version 2.0", "for elem in transformations], ) self.catalog = { \"cov\": {},", "max_floor = len(self.parent[elem].transformations) max_floor -= len(self.transformations) for k in range(max_floor):", "a vColumn means simply not selecting it in the final", ") ) return self.parent except Exception as e: raise ConversionError(", "BY {} ASC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias, n )", "/ 3.0), 1e-99) if method.lower() == \"sturges\": best_h = sturges", "- total) except Exception as e: self.transformations = [elem for", "---# def min(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'min'", "(\"right\", right, [bool]), ] ) assert self.isnum() or self.isdate(), TypeError(", "[(func, ctype, category)] self.parent[copy_name].catalog = self.catalog self.parent.__add_to_history__( \"[Apply]: The vColumn", "\"\\n\" for k in range(len(distinct_elements)): expr += [ \"'{}', {}\".format(str(distinct_elements[k]).replace(\"'\",", "the vColumn is numerical , 'categorical' otherwise. categorical : Uses", "if the absolute value of the record is greater than", "\"If the method is in ffill|pad|bfill|backfill then 'order_by' must be", "self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) total = abs(self.count() - total) except Exception", "new_column = \"COALESCE({}, {}({}) OVER (PARTITION BY {}))\".format( \"{}\", fun,", "the discretization. topk : Keeps the topk most frequent categories", "self.category())] self.transformations += [(new_column, ctype, category)] try: sauv = {}", "Returns ------- int vColumn cardinality (or approximate cardinality). See Also", "2 quantiles used to draw the Plot. start_date: str /", "the category of the vColumn. The category will be one", "== \"winsorize\": self.clip(lower=p_alpha, upper=p_1_alpha) elif method == \"null\": self.apply( func=\"(CASE", "rose = True if pie_type == \"rose\" else False if", "\"_\") ) n = 1 if drop_first else 0 for", "of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import spider as", "ORDER BY COUNT(*)\" \" DESC OFFSET {1}) VERTICAPY_SUBTABLE) ORDER BY", "self.parent[copy_name].catalog = self.catalog self.parent.__add_to_history__( \"[Apply]: The vColumn '{}' was transformed", "\"Length of parameter 'breaks' must be greater or equal to", "for elem in all_partition: if len(self.parent[elem].transformations) > max_floor: max_floor =", "[ f\"'{first_elem}' {op1} {column} AND {column} {op2} '{second_elem}' THEN '{label}'\"", "be transformed. Parameters ---------- to_replace: str Regular expression to replace.", "the vColumn using the average of the response partitioned by", "by[0], ), title=\"Computing the different categories {} to normalize.\".format( by[0]", "\"\"\" --------------------------------------------------------------------------- Subtracts the input element from the vColumn. Parameters", "), print_time_sql=False, ) except: cmax, cmin = ( \"MAX({}) OVER", "/\\ \\/\\ \\ /\\ \"-./ \\ /\\ \"-.\\ \\ #", "largest vColumn elements. \"\"\" check_types([(\"n\", n, [int, float])]) query =", "available on type numeric|date\" ) if self.isnum(): result = (", "len(args) > 0: column = args[0] elif \"column\" in kwargs:", "area: bool = False, step: bool = False, ax=None, **style_kwds,", "else \"\" query = \"SELECT {} AS {}, COUNT(*) AS", "(byte) See Also -------- vDataFrame.memory_usage : Returns the vDataFrame memory", "vColumn. Parameters ---------- ts: str TS (Time Series) vColumn to", "the vColumn. The vColumn will be transformed. Parameters ---------- start:", "/ text / binary / spatial / uuid / undefined", "tangent x: int/float, optional If the function has two arguments", "Computes the vDataFrame input aggregations. \"\"\" check_types([(\"dropna\", dropna, [bool]), (\"n\",", "\"\"\" check_types([(\"x\", x, [int, float], (\"approx\", approx, [bool]))]) prefix =", "\".format(\"{}\", upper, upper) if (isinstance(upper, (float, int))) else \"\" )", "Returns the vDataFrame expected store usage. \"\"\" pre_comp = self.parent.__get_catalog_value__(self.alias,", "of the vColumn records. The vColumn will be transformed. Parameters", "x, [int, float])]) assert x != 0, ValueError(\"Division by 0", "the input method. Parameters ---------- method: str, optional The describe", "/ TIMEZONE_MINUTE / WEEK / YEAR Returns ------- vDataFrame self.parent", "discretize the vColumn. auto : Uses method 'same_width' for numerical", "h: float = 0, max_cardinality: int = 8, cat_priority: list", "store=False, ) try: result = model.fit(self.parent.__genSQL__(), [self.alias]).plot( ax=ax, **style_kwds )", "if self.isnum() else \"topk\", nbins=nbins, k=nbins, new_category=\"Others\", return_enum_trans=True, )[0].replace(\"{}\", self.alias)", "------- float vColumn memory usage (byte) See Also -------- vDataFrame.memory_usage", "self.catalog[\"mad\"] = 1 / 1.4826 elif method == \"zscore\": self.catalog[\"mean\"]", "float product See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "regular width bins. smart : Uses the Random Forest on", "naming.\\nBy changing one of \" \"the parameters ('prefix', 'prefix_sep'), you'll", "0 and 1 (1 meaning global outlier). \"\"\" check_types( [", "(\"pie_type\", pie_type, [\"auto\", \"donut\", \"rose\"]), ] ) donut = True", "Value (IV) / Weight Of Evidence (WOE) Table. It tells", "children vColumns whereas one vColumn can only have one parent.", "threshold ) ) else: p_alpha, p_1_alpha = ( self.parent.quantile([alpha, 1", "= \"\"): \"\"\" --------------------------------------------------------------------------- Replaces the regular expression matches in", "decrease. Returns ------- int vColumn cardinality (or approximate cardinality). See", "number round : rounds a value to a specified number", ": Encodes the vColumn with Label Encoding. vDataFrame[].mean_encode : Encodes", "conducting # data science projects on data stored in Vertica,", "vColumn category is date. \"\"\" return self.category() in (\"float\", \"int\")", "n, [int, float])]) query = \"SELECT * FROM {} WHERE", "vColumns simplify several processes with its abstractions. Parameters ---------- alias:", "testing purpose. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.outliers", "[1, 0]) or self.isbool(): all_new_features = [] prefix = (", "the following: CENTURY / DAY / DECADE / DOQ /", "/ # | \\/ / / / # |______ /", "'\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) assert not (self.parent.is_colname_in(name)), NameError(", "-------- vDataFrame.boxplot : Draws the Box Plot of the input", "\"NULLIFZERO\" if (nullifzero) else \"\", cmax, cmin, ) else: final_transformation", "maximum mean : average median : median min : minimum", "vColumn. auto : Uses method 'same_width' for numerical vColumns, cast", "Aggregates the vColumn using 'max' (Maximum). Returns ------- float/str maximum", "mul(self, x: float): \"\"\" --------------------------------------------------------------------------- Multiplies the vColumn by the", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "< int(float(count / int(nbins))) * int(nbins): nth_elems += [str(total)] total", "self, by: str = \"\", bandwidth: float = 1.0, kernel:", "Evaluates a customized expression. \"\"\" check_types([(\"name\", name, [str])]) name =", "Drops outliers in the vColumn. Parameters ---------- threshold: float, optional", "y) result = to_tablesample(query, title=title) result.values[\"index\"] += [\"total\"] result.values[\"non_events\"] +=", "cmin = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [ \"{},", "will be computed if the parameter is empty or invalid.", "the vColumn using as lower bound quantile(alpha) and as upper", "trigonometric tangent tanh : hyperbolic tangent x: int/float, optional If", "the n largest vColumn elements. Parameters ---------- n: int, optional", "{}) / {}({})\".format( \"{}\", avg, \"NULLIFZERO\" if (nullifzero) else \"\",", "result = executeSQL( query=query, title=\"Computing the optimized histogram nbins using", "\"regr_count\": {}, \"regr_intercept\": {}, \"regr_r2\": {}, \"regr_slope\": {}, \"regr_sxx\": {},", "|| ''\", \"varchar\", \"text\") else: trans = (\"{} || ''\",", "the Type casting.\") self.transformations += [ ( \"{}::{}\".format(\"{}\", dtype), dtype,", "[(new_column, ctype, category)] try: sauv = {} for elem in", "vdf_table AS (SELECT * FROM {}) {}\".format( self.parent.__genSQL__(), query )", "len(self.transformations) for k in range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())]", "= \"SELECT split_value FROM (SELECT split_value, MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE", "\"\"\" if isinstance(func, str_sql): func = str(func) check_types([(\"func\", func, [str]),", "vColumn with user defined Encoding. vDataFrame[].get_dummies : Encodes the vColumn", "[self.alias], response) query = [ \"(SELECT READ_TREE(USING PARAMETERS model_name =", "add_history: bool, optional If set to True, the information will", "(\"ts\", ts, [str]), (\"q\", q, [tuple]), ( \"start_date\", start_date, [str,", "The catalog will be updated when the parent vDataFrame is", "return self.aggregate([\"avg\"]).values[self.alias][0] mean = avg # ---# def bar( self,", "the new categories. If empty, names will be generated. include_lowest:", "list List of values used to cut the vColumn. labels:", "max_cardinality: int, optional Maximum number of the vColumn distinct elements", "= self.discretize( method=\"same_width\" if self.isnum() else \"topk\", nbins=nbins, k=nbins, new_category=\"Others\",", "method, of, max_cardinality, nbins, h, ax=ax, **style_kwds) # ---# def", "a response. \"\"\" check_types( [ (\"RFmodel_params\", RFmodel_params, [dict]), (\"return_enum_trans\", return_enum_trans,", "by, h, max_cardinality, cat_priority, ax=ax, **style_kwds) # ---# def category(self):", "colors[idx % len(colors)]} ax = self.parent.search( \"{} = '{}'\".format(self.parent[by].alias, column)", "drop(tmp_model_name, method=\"model\") raise drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") result = [self.min()]", "Frequency discretization\" ) total, query, nth_elems = nb, [], []", "verticapy.options[\"temp_schema\"] if not (schema): schema = \"public\" name = gen_tmp_name(schema=schema,", "pt_non_events, pt_events, CASE WHEN non_events = 0 OR events =", "cat_priority: list, optional List of the different categories to consider", "return self.parent # ---# def median( self, approx: bool =", "= [ ( \"({} - {}) / {}({} - {})\".format(", "mode : mode (most occurent element). 0ifnull : 0 when", "the data. # # # Modules # # Standard Python", "type is date like (date, datetime ...), the parameter 'x'", "of digits after the comma. Parameters ---------- n: int Number", "nbins: int = 200, xlim: tuple = None, ax=None, **style_kwds,", "Parameters ---------- limit: int, optional Number of elements to display.", "the vColumn category is date, False otherwise. Returns ------- bool", "Plot. ax: Matplotlib axes object, optional The axes to plot", "methods. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].dropna :", "self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) try: if \"count\" in sauv: self.catalog[\"count\"] =", "| \\/ / / / # |______ / / /", "\"minmax\": self.catalog[elem] = (sauv[elem] - sauv[\"min\"]) / ( sauv[\"max\"] -", "the lower bound to the lower bound itself and the", "h will be picked or computed) h: float, optional Interval", "<= 0: h = self.numh() else: h = (self.max() -", "power of an independent variable in relation to the dependent", "the Bar Chart of vColumn based on an aggregation. \"\"\"", "mean, STDDEV({3}{4}) AS std, MIN({3}{4}) AS min, APPROXIMATE_PERCENTILE ({3}{4} USING", "list = [], ): \"\"\" --------------------------------------------------------------------------- Fills missing elements in", "[int, float]), (\"offset\", offset, [int, float])]) if offset < 0:", "silverman : Silverman kernel. nbins: int, optional Maximum number of", "True, the approximate median is returned. By setting this parameter", "For more information, see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html Returns ------- ax Matplotlib", "+= [sum(result[\"iv\"])] return result # ---# def kurtosis(self): \"\"\" ---------------------------------------------------------------------------", "{} IN ({}) THEN {} || '' ELSE '{}' END)\".format(", "Returns the n smallest elements in the vColumn. \"\"\" check_types([(\"n\",", "function's performance can drastically decrease. Returns ------- float quantile (or", "and two input quantiles. Parameters ---------- ts: str TS (Time", "FROM (SELECT COUNT(*) AS count\" \" FROM vdf_table WHERE {0}", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", ": standard deviation topk : kth most occurent element (ex:", "If set to True, draw a Step Plot. ax: Matplotlib", "---# def date_part(self, field: str): \"\"\" --------------------------------------------------------------------------- Extracts a specific", "vColumn. \"\"\" check_types([(\"new_name\", new_name, [str])]) old_name = quote_ident(self.alias) new_name =", "pat.replace(\"'\", \"''\")) ) # ---# def str_count(self, pat: str): \"\"\"", "TypeError( \"cut only works on numerical / date-like vColumns.\" )", "{} AS {}, COUNT(*) AS _verticapy_cnt_, 100 * COUNT(*) /", "Encoding algorithm. Parameters ---------- prefix: str, optional Prefix of the", "time series rule. Parameters ---------- length: int Slice size. unit:", "assert self.isnum() or self.isdate(), ParameterError( \"numh is only available on", "or (self.category() == \"float\"): trans = ( \"'[' || FLOOR({}", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame.apply : Applies", "/ MINUTE / MONTH / QUARTER / SECOND / TIME", "ORDER BY _verticapy_cnt_ DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_", "] ) donut = True if pie_type == \"donut\" else", "+ result[\"avg\"][0], threshold * result[\"std\"][0] + result[\"avg\"][0], ) else: query", "self.alias ) warnings.warn(warning_message, Warning) return self elif method == \"minmax\":", ") query = \"SELECT COUNT({}) AS NAs, MIN({}) AS min,", "aggregations. \"\"\" check_types( [ (\"method\", method, [\"auto\", \"numerical\", \"categorical\", \"cat_stats\"]),", "method=\"model\") if self.parent[response].category() == \"float\": model = RandomForestRegressor(tmp_model_name) else: model", "\"sigmoid\", \"silverman\"]), (\"bandwidth\", bandwidth, [int, float]), (\"nbins\", nbins, [float, int]),", "\"smart\" ): n = len(result) trans = \"(CASE \" for", "a default function to the vColumn. Parameters ---------- func: str", "method=\"fetchall\", ) for i in range(len(result)): if result[i][2] == None:", "optional Slice size unit. For example, it can be 'minute'", "limit = index_stop - index_start if limit <= 0: limit", "0.05 ): \"\"\" --------------------------------------------------------------------------- Drops outliers in the vColumn. Parameters", "float, optional Upper bound. Returns ------- vDataFrame self.parent See Also", "axes object See Also -------- vDataFrame[].bar : Draws the Bar", "\" self.parent.__add_to_history__( \"[Get Dummies]: One hot encoder was applied to", "self.catalog[\"percent\"] = ( 100 * sauv[\"count\"] / self.parent.shape()[0] ) for", "None avg = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [", "int = 10): \"\"\" --------------------------------------------------------------------------- Returns the n smallest elements", "method. \"\"\" check_types([(\"lower\", lower, [float, int]), (\"upper\", upper, [float, int])])", "self.parent.shape()[0], \"percent\": 100.0, \"unique\": 2, \"approx_unique\": 2, \"prod\": 0, },", "= self.parent.format_colnames(by) from verticapy.plot import gen_colors from matplotlib.lines import Line2D", "If set to True, draw an Area Plot. step: bool,", "ax=ax, **style_kwds) # ---# def boxplot( self, by: str =", "when 'method' is set to 'smart'. A RF Regressor will", "\"regr_syy\": {}, } for elem in catalog: self.catalog[elem] = catalog[elem]", "means simply not selecting it in the final generated SQL", "(\"nbins\", nbins, [int, float]), (\"h\", h, [int, float]), ] )", "(self.parent[by[0]].nunique() < 50): try: result = executeSQL( \"SELECT {}, AVG({}),", "can drastically decrease. Returns ------- int vColumn cardinality (or approximate", "(None, None), ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the spider", "\"\"\" if isinstance(val, str) or not (isinstance(val, Iterable)): val =", "\\_____\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\ \\_\\\\\"\\_\\ #", "get the median). It can also be a cutomized aggregation", "---------- by: str, optional vColumn to use to partition the", "self.parent.__get_last_order_by__(), index_start, limit, ) return vDataFrameSQL(query) elif isinstance(index, int): cast", "func: str Function to use to transform the vColumn. abs", "|| ';' || (FLOOR({} / {}) * {} + {}{})", ": value down to the next whole number ln :", "return self.apply( func=\"REGEXP_COUNT({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def", "OFFSET {}{}) VERTICAPY_SUBTABLE\".format( self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index_start, limit, ) return", "vColumns to the input type. \"\"\" check_types([(\"dtype\", dtype, [str])]) try:", "other into one unique category. h: float, optional The interval", "range(len(breaks) - 1): first_elem, second_elem = breaks[idx], breaks[idx + 1]", "\"COALESCE({}, '{}')\".format(\"{}\", val) elif expr: new_column = \"COALESCE({}, {})\".format(\"{}\", expr)", "[int, float])]) return self.apply(func=\"{} * ({})\".format(\"{}\", x)) # ---# def", "of the kernel. kernel: str, optional The method used for", "|| '' ELSE '{}' END)\".format( bin_spatial_to_str(self.category()), \", \".join( [ \"'{}'\".format(str(elem).replace(\"'\",", "if not (\"legend\" in kwargs): kwargs[\"legend\"] = True if not", "/ MILLISECONDS / MINUTE / MONTH / QUARTER / SECOND", "bins. If empty, an optimized number of bins will be", "\"\" query, cat = [], self.distinct() if len(cat) == 1:", "quantile is returned. By setting this parameter to False, the", "== 2, TypeError( \"vColumn {} must be binary to use", "(6, 6), h: Union[int, float, tuple] = (None, None), ax=None,", "e: raise QueryError( \"{}\\nError when applying the func 'x ->", "if the vColumn is boolean. vDataFrame[].isnum : Returns True if", "not in [\"mode\", \"0ifnull\"]: max_floor = 0 all_partition = by", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.quantile(0.5,", "sauv[\"count\"] / self.parent.shape()[0] ) except: pass self.parent.__add_to_history__( \"[Discretize]: The vColumn", "will filter the data when 'ts' is greater than November", "/ # \\ / / / # \\ / /", "mean_1_alpha == None: mean_alpha = \"NULL\" self.apply( func=\"(CASE WHEN {}", "[bool]), ] ) assert self.isnum() or self.isdate(), TypeError( \"cut only", "result[\"avg\"][0], threshold * result[\"std\"][0] + result[\"avg\"][0], ) else: query =", ": average absolute deviation approx_unique : approximative cardinality count :", "the different categories to consider when drawing the box plot.", "------- str/float/int vColumn nth most occurent element. See Also --------", "optional New value. Returns ------- vDataFrame self.parent See Also --------", "nbins=nbins, xlim=(xmin, xmax), ax=ax, **updated_dict(param, style_kwds, idx), ) custom_lines +=", "Also -------- vDataFrame[].apply : Applies a function to the input", "distinct(self, **kwargs): \"\"\" --------------------------------------------------------------------------- Returns the distinct categories of the", "the mean skewness : skewness sum : sum std :", "= \"MEDIAN\" if (method == \"median\") else \"AVG\" if by", "to True, the method will return the transformation used instead", "numerical vColumns, cast the other types to varchar. same_freq :", "(FLOOR({} / {}) * {} + {}{}) || ']'\".format( \"{}\",", "y, [str]), (\"nbins\", nbins, [int])]) self.parent.are_namecols_in(y) y = self.parent.format_colnames(y) assert", "def rename(self, new_name: str): \"\"\" --------------------------------------------------------------------------- Renames the vColumn by", "the func 'x -> {}'.\".format( copy_name.replace('\"', \"\"), func.replace(\"{}\", \"x\"), )", "parameter 'by' is empty\\nIf you want to normalize by grouping", "index mad : median absolute deviation max : maximum mean", "used to name the new categories. If empty, names will", "1 and 2 to be used as categorical (No h", "(float, int))) else \"\" ) upper_when = ( \"WHEN {}", "\\/_____/ \\/_____/ \\/_/ \\/_/ \\/_/ \\/_/ # # # ---#", "self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += final_transformation sauv =", ": rounds a value to a specified number of decimal", "(\"method\", method, [str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [list]), (\"h\",", "= self.ctype() tail.name = self.alias return tail # ---# def", "= \"Describes the statics of {} partitioned by {}.\".format( numcol,", "than the threshold, it will be considered as an outlier.", "FROM (SELECT {}, ROW_NUMBER() OVER (ORDER BY {}) AS _verticapy_row_nb_", "= index.start if not (isinstance(index_start, int)): index_start = 0 if", "the parent vDataFrame is modified. Attributes ---------- alias, str :", "or computed) nbins: int, optional Number of bins. If empty,", "--------------------------------------------------------------------------- Aggregates the vColumn using an input 'quantile'. Parameters ----------", "mod : remainder of a division operation pow : number", "approximate cardinality is returned. By setting this parameter to False,", "\"\"\" --------------------------------------------------------------------------- Python object which that stores all user transformations.", "where, ) result = executeSQL( query=query, title=\"Computing the equal frequency", "x^2 + 2 use \"POWER({}, 2) + 2\". copy_name: str,", "\\'/ \\ \\ \\____ \\ \\ \\/\\ \\ \\ \\", "of the list will be included. right: bool, optional How", "--------------------------------------------------------------------------- Extracts a specific TS field from the vColumn (only", "vColumn means simply not selecting it in the final generated", "new_category=\"Others\", return_enum_trans=True, )[0].replace(\"{}\", self.alias) query = \"SELECT {} AS {},", "using 'skewness'. Returns ------- float skewness See Also -------- vDataFrame.aggregate", "str, value: str = \"\"): \"\"\" --------------------------------------------------------------------------- Replaces the regular", "(SELECT {}, COUNT(*) AS _verticapy_cnt_ FROM {}{}GROUP BY {} ORDER", "Sturges [CEIL(log2(n)) + 1] Returns ------- float optimal bar width.", "Values lesser than quantile(alpha) or greater than quantile(1-alpha) will be", "self.alias, self.alias, self.parent.__genSQL__(), by[0], ), title=\"Computing the different categories {}", "---# def str_replace(self, to_replace: str, value: str = \"\"): \"\"\"", "the vColumn. \"\"\" check_types([(\"new_name\", new_name, [str])]) old_name = quote_ident(self.alias) new_name", "utilities.tablesample. See Also -------- vDataFrame[].nsmallest : Returns the n smallest", "geo_plot(self, *args, **kwargs): \"\"\" --------------------------------------------------------------------------- Draws the Geospatial object. Parameters", "are the median and two input quantiles. Parameters ---------- ts:", "when the parameter method is set to 'cat_stats'. Returns -------", "executeSQL( query=query, title=\"Getting the vColumn element.\", method=\"fetchfirstelem\", ) else: return", "max_floor = max( len(self.parent[column].transformations), max_floor ) except: pass max_floor -=", "return self.aggregate([\"aad\"]).values[self.alias][0] # ---# def abs(self): \"\"\" --------------------------------------------------------------------------- Applies the", "'name', you'll be able to solve this issue.\" ) new_vColumn", "parameter. alpha: float, optional Number representing the outliers threshold. Values", "(x - min) / (max - min) by: list, optional", "optimal h.\", method=\"fetchrow\", ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max =", "re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"', \"\"))), func ) ): max_floor = max( len(self.parent[column].transformations), max_floor", "elif math.isnan(result[i][2]): result[i][2] = None avg = \"DECODE({}, {}, NULL)\".format(", "Also -------- vDataFrame[].head : Returns the head of the vColumn.", "{}({})\".format( \"{}\", avg, \"NULLIFZERO\" if (nullifzero) else \"\", stddev ),", "transformed. Parameters ---------- start: int Start of the slicing. step:", ": Draws the donut chart of the vColumn based on", "vDataFrame self.parent See Also -------- vDataFrame.add_copy : Creates a copy", "len(self.transformations) if copy_name: self.add_copy(name=copy_name) for k in range(max_floor): self.parent[copy_name].transformations +=", "cutomized aggregation (ex: AVG(column1) + 5). of: str, optional The", "by = self.parent.format_colnames(by) nullifzero, n = 1, len(by) if self.isbool():", "= \"auto\", h: float = 0, nbins: int = -1,", "\"Reads {} {} smallest elements.\".format(n, self.alias) return to_tablesample(query, title=title) #", "---------- to_replace: str Regular expression to replace. value: str, optional", "\"-./ \\ /\\ \"-.\\ \\ # \\ \\ \\'/ \\", "def kurtosis(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'kurtosis'. Returns", "vDataFrame[].describe : Computes the vColumn descriptive statistics. \"\"\" check_types([(\"k\", k,", "return self.ctype().lower() in (\"bool\", \"boolean\") # ---# def isdate(self): \"\"\"", "Response vColumn. nbins: int, optional Maximum number of nbins used", "# ---# class vColumn(str_sql): \"\"\" --------------------------------------------------------------------------- Python object which that", "when using the 'topk' method. RFmodel_params: dict, optional Dictionary of", "= \"mean\" if (self.isnum() and self.nunique(True) > 6) else \"mode\"", "\"{}\", by[0], \", \".join( [\"{}, {}\".format(elem[0], elem[1]) for elem in", "med, mad) else: final_transformation = [ ( \"({} - {})", "self.isnum(): result = ( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False ) .transpose()", "each record of the vColumn. vDataFrame[].str_slice : Slices the vColumn.", "parameter 'name' must not be empty\" ) assert not (self.parent.is_colname_in(name)),", "[int, float], (\"approx\", approx, [bool]))]) prefix = \"approx_\" if approx", "to True, the approximate median is returned. By setting this", ": Returns True if the vColumn is numerical. \"\"\" return", "The vColumn {} was converted to {}.\".format( self.alias, dtype )", "want to normalize by grouping by elements, please use a", "\\_____\\ \\ \\_\\ \\ \\_\\ \\ \\_\\\\\"\\_\\ # \\/_/ \\/_____/", "of a response. \"\"\" check_types( [ (\"prefix\", prefix, [str]), (\"prefix_sep\",", "return self.parent # ---# def mad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "in each record of the vColumn. vDataFrame[].str_replace : Replaces the", "projects on data stored in Vertica, taking advantage Vertica’s #", "item in query_result] else: result = ( self.parent.describe( method=\"numerical\", columns=[self.alias],", "return tablesample(values) # ---# def value_counts(self, k: int = 30):", "upper bound to the upper bound itself. Parameters ---------- lower:", "parameter method is set to 'cat_stats'. Returns ------- tablesample An", "use_threshold: bool, optional Uses the threshold instead of the 'alpha'", "\", \".join( [\"{}, {}\".format(elem[0], elem[1]) for elem in result] ),", "{}\".format(str(distinct_elements[k]).replace(\"'\", \"''\"), k) ] text_info += \"\\t{} => {}\".format(distinct_elements[k], k)", "\"'[' || FLOOR({} / {}) * {} || ';' ||", "# ---# def aggregate(self, func: list): \"\"\" --------------------------------------------------------------------------- Aggregates the", "will be computed. h: float, optional Interval width of the", "input element from the vColumn. Parameters ---------- x: float If", "the computation. max_cardinality: int, optional Cardinality threshold to use to", "using 'std' (Standard Deviation). Returns ------- float std See Also", "the median and two input quantiles. Parameters ---------- ts: str", "\"\", self.alias ), method=\"fetchall\", ) values = { \"index\": [item[0]", "For more information, see utilities.tablesample. See Also -------- vDataFrame[].tail :", "self.transformations += [ (\"AVG({}) OVER (PARTITION BY {})\".format(response, \"{}\"), \"int\",", "0 OR events = 0 THEN 0 ELSE ZEROIFNULL(LN(pt_non_events /", "int = 5): \"\"\" --------------------------------------------------------------------------- Returns the head of the", "\"''\")) ) result[idx][1] = \"NULL\" if (elem[1] == None) else", "n = 1 then this method will return the mode", "{} THEN {} WHEN {} > {} THEN {} ELSE", "[str])]) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) assert self.parent[response].isnum(), TypeError( \"The response", "in compliance with the License. # You may obtain a", "= ( -threshold * result[\"std\"][0] + result[\"avg\"][0], threshold * result[\"std\"][0]", "copy_name, [str])]) try: try: ctype = get_data_types( \"SELECT {} AS", "category is date. \"\"\" return self.category() in (\"float\", \"int\") #", "--------------------------------------------------------------------------- Aggregates the vColumn using 'kurtosis'. Returns ------- float kurtosis", "table ) result = executeSQL( query, title=\"Different aggregations to compute", "vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of", "= model.get_params() try: model.fit(tmp_view_name, [self.alias], response) query = [ \"(SELECT", "--------------------------------------------------------------------------- Returns a part of the vColumn (delimited by an", "{} FROM {}{} LIMIT {} OFFSET {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias,", "if use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values p_alpha, p_1_alpha = (", "alias {new_name}.\\nBy changing the parameter 'new_name', you'll be able to", "None), ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the spider plot", "to 'cat_stats'. Returns ------- tablesample An object containing the result.", "the vDataFrame \"heavier\" if it is used to compute other", "chart. donut : Donut chart. rose : Rose chart. It", "Returns ------- bool True if the vColumn is boolean. See", "floor : value down to the next whole number ln", "affiliates. # Licensed under the Apache License, Version 2.0 (the", "= '03-11-1993' will filter the data when 'ts' is lesser", "transformation operations, and offers beautiful graphical options. # # VerticaPy", "else: check = False if check: self.parent.are_namecols_in(column) column = self.parent.format_colnames(column)", "---# def max(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'max'", "------- float product See Also -------- vDataFrame.aggregate : Computes the", "with user defined Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode", "slicing instead of the ceiling. Returns ------- vDataFrame self.parent See", "prefix_sep.replace('\"', \"_\") if not (prefix) else prefix.replace('\"', \"_\") + prefix_sep.replace('\"',", "practice to use this method when first preparing your data.", "\"auto\", expr: str = \"\", by: list = [], order_by:", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"avg\"]).values[self.alias][0] mean = avg", "See Also -------- vDataFrame[].drop_outliers : Drops outliers in the vColumn.", "self.category() in (\"float\", \"int\") # ---# def iv_woe(self, y: str,", "self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] all_new_features += [name] conj = \"s were", "), method=\"fetchall\", ) values = { \"index\": [item[0] for item", "regular expression in each record of the vColumn. vDataFrame[].extract :", "SUM(1 - {}) AS non_events, SUM({}) AS events FROM ({})", "object See Also -------- vDataFrame.boxplot : Draws the Box Plot", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'skewness'. Returns ------- float", "100}) model.set_params(RFmodel_params) parameters = model.get_params() try: model.fit(tmp_view_name, [self.alias], response) query", "please use a method in zscore|minmax\" warnings.warn(warning_message, Warning) return self", ": Applies a function to the input vColumn. \"\"\" check_types(", "{})\".format( self.alias, \", \".join(by) ), ) if return_trans: return \"({}", "str vColumn alias. transformations: list, optional List of the different", "date. vDataFrame[].isnum : Returns True if the vColumn is numerical.", "but will also increase the time of the learning and", "{}) AS non_events, SUM({}) AS events FROM ({}) x GROUP", "{}) VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_ ASC LIMIT 1\".format( self.alias, self.alias,", "/ {}({} - {})\".format( self.alias, cmin, \"NULLIFZERO\" if (nullifzero) else", "dtype: str New type. Returns ------- vDataFrame self.parent See Also", "method, [\"auto\", \"numerical\", \"categorical\", \"cat_stats\"]), (\"max_cardinality\", max_cardinality, [int, float]), (\"numcol\",", "threshold it will be considered as an outlier. use_threshold: bool,", "(is_numeric) or (method == \"categorical\") ): query = \"\"\"(SELECT {0}", "({})\".format(\"{}\", x)) # ---# def add_copy(self, name: str): \"\"\" ---------------------------------------------------------------------------", "most occurent elements and their distributions as percents. Parameters ----------", "< 0: index += self.parent.shape()[0] query = \"SELECT {}{} FROM", "datetime from collections.abc import Iterable from typing import Union #", "category is date, False otherwise. Returns ------- bool True if", "be picked or computed) nbins: int, optional Number of nbins.", "\"abs\", \"acos\", \"asin\", \"atan\", \"cbrt\", \"ceil\", \"cos\", \"cosh\", \"cot\", \"exp\",", "[self.min()] + result + [self.max()] elif method == \"topk\": assert", "[best_h]}) if self.category() == \"int\": best_h = max(math.floor(best_h), 1) return", "the vColumn using 'unique' (cardinality). Parameters ---------- approx: bool, optional", "# data science projects on data stored in Vertica, taking", "as one column of that relation. vColumns simplify several processes", "= 0 OR events = 0 THEN 0 ELSE (pt_non_events", "record of the vColumn. vDataFrame[].extract : Extracts the regular expression", "if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import boxplot", "input vColumn. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply", "breaks: list List of values used to cut the vColumn.", "cast the other types to varchar. same_freq : Computes bins", "title=\"Computing the quantiles of {}.\".format(self.alias), method=\"fetchrow\", ) if method ==", "with the License. # You may obtain a copy of", "close_r = \"<=\", \"<\", \"[\", \"[\" if idx == 0", "[] columns = self.parent[by].distinct() for idx, column in enumerate(columns): param", "= get_dummies # ---# def head(self, limit: int = 5):", "data when 'ts' is greater than November 1993 the 3rd.", "self.ctype(), self.category()) ] self.parent[copy_name].transformations += [(func, ctype, category)] self.parent[copy_name].catalog =", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'sem' (standard error of", "in pure SQL used to transform the vColumn. The function", "expr = \"DECODE({}, '{}', 1, 0)\".format( \"{}\", str(distinct_elements[k]).replace(\"'\", \"''\") )", "\"[Discretize]: The vColumn {} was discretized.\".format(self.alias) ) return self.parent #", "if the vColumn category is date. See Also -------- vDataFrame[].isbool", "= \"gaussian\", nbins: int = 200, xlim: tuple = None,", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"max\"]).values[self.alias][0] # ---# def", "= self.parent.__get_catalog_value__(self.alias, \"top\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": if not (dropna)", "columns = [self.alias] check = True if len(args) > 0:", "return_enum_trans: bool = False, ): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn", "the input vColumns based on an aggregation. \"\"\" check_types( [", "field to extract. It must be one of the following:", "operations, and offers beautiful graphical options. # # VerticaPy aims", "input aggregations. \"\"\" return self.aggregate(func=[\"prod\"]).values[self.alias][0] prod = product # ---#", "pie chart. donut : Donut chart. rose : Rose chart.", "\"\"\" --------------------------------------------------------------------------- Clips the vColumn by transforming the values lesser", "{}, format = 'tabular'))\".format( tmp_model_name, i ) for i in", "= [\"DECODE({}\"] text_info = \"\\n\" for k in range(len(distinct_elements)): expr", "= \"DECODE({}, '{}', 1, 0)\".format( \"{}\", str(distinct_elements[k]).replace(\"'\", \"''\") ) transformations", "\\ \\____ \\ \\ \\_\\ \\ \\ \\ \\-./\\ \\", "value of the record is greater than the threshold, it", "aggregations. \"\"\" return self.aggregate([\"aad\"]).values[self.alias][0] # ---# def abs(self): \"\"\" ---------------------------------------------------------------------------", "mad : median absolute deviation max : maximum mean :", "---------- threshold: float, optional Uses the Gaussian distribution to identify", "\"round\"): expr = \"{}({})\".format(func.upper(), \"{}\") else: expr = \"{}({}, {})\".format(func.upper(),", "\"float\"): trans = ( \"'[' || FLOOR({} / {}) *", "instead of the 'alpha' parameter. alpha: float, optional Number representing", "Absolute Deviation). Returns ------- float aad See Also -------- vDataFrame.aggregate", "generated SQL code. Note: Dropping a vColumn can make the", "self.isnum() else \"topk\", nbins=nbins, k=nbins, new_category=\"Others\", return_enum_trans=True, )[0].replace(\"{}\", self.alias) query", "(1 meaning global outlier). \"\"\" check_types( [ (\"alpha\", alpha, [int,", "parameters = model.get_params() try: model.fit(tmp_view_name, [self.alias], response) query = [", "to use to determine if the vColumn will be considered", "vColumn with Label Encoding. vDataFrame[].get_dummies : Encodes the vColumn with", "int, optional Number of elements to display. Returns ------- tablesample", "\"float\", \"float\", ) ] if method != \"robust_zscore\": max_floor =", "vColumns 1 and 2 bars. It is only valid if", "] ) method = method.lower() assert (method != \"cat_stats\") or", "\".\" ) return self.parent one_hot_encode = get_dummies # ---# def", "+ sys.getsizeof(self.alias) + sys.getsizeof(self.transformations) + sys.getsizeof(self.catalog) ) for elem in", "except in compliance with the License. # You may obtain", "- sauv[\"approx_50%\"]) / ( 1.4826 * sauv[\"mad\"] ) elif method", "'unique' (cardinality). Parameters ---------- approx: bool, optional If set to", "NULL GROUP BY 1) x ORDER BY verticapy_agg DESC\".format( self.alias,", "{ \"index\": [item[0] for item in result], \"count\": [int(item[1]) for", "to the input vColumn. \"\"\" check_types([(\"n\", n, [int, float])]) return", "FROM {} LIMIT 1\".format( cmax, cmin, self.parent.__genSQL__() ), print_time_sql=False, )", "\"percent\": [float(round(item[2], 3)) for item in result], } return tablesample(values)", "Encodes the vColumn using the mean encoding of a response.", "{} was transformed using a mean encoding with {} as", "= False ): \"\"\" --------------------------------------------------------------------------- Normalizes the input vColumns using", "/ WEEK / YEAR Returns ------- vDataFrame self.parent See Also", "= \"{}({})\".format(func.upper(), \"{}\") else: expr = \"{}({}, {})\".format(func.upper(), \"{}\", x)", "a numerical value\" ) lower_when = ( \"WHEN {} <", "[trans] sauv = {} for elem in self.catalog: sauv[elem] =", "max_cardinality, [int, float]), (\"h\", h, [int, float]), (\"nbins\", nbins, [int,", "= 10): \"\"\" --------------------------------------------------------------------------- Computes the Information Value (IV) /", "a function to the vColumn. \"\"\" check_types( [ ( \"func\",", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].fill_outliers : Fills", "\"backfill\"]: all_partition += [elem for elem in order_by] for elem", "of the record is greater than the threshold, it will", "vColumn {} was converted to {}.\".format( self.alias, dtype ) )", ") if distinct_count > max_cardinality: query += ( \"UNION ALL", "return self.describe(method=\"categorical\", max_cardinality=k) # ---# def var(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "/ self.parent.shape()[0] ) except: pass total = int(total) conj =", "based on an aggregation. \"\"\" check_types( [ (\"method\", method, [str]),", "int): \"\"\" --------------------------------------------------------------------------- Rounds the vColumn by keeping only the", "\"second\", start: bool = True): \"\"\" --------------------------------------------------------------------------- Slices and transforms", "UNION ALL \".join(query), nbins - 1 ) result = executeSQL(", "= executeSQL( query, title=\"Computing the different aggregations.\", method=\"fetchall\", ) for", "if approx: return self.aggregate(func=[\"approx_unique\"]).values[self.alias][0] else: return self.aggregate(func=[\"unique\"]).values[self.alias][0] # ---# def", ") if use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values p_alpha, p_1_alpha =", "step)) # ---# def sub(self, x: float): \"\"\" --------------------------------------------------------------------------- Subtracts", "CONDITIONS OF ANY KIND, either express or implied. # See", "+= [\"\"] result.values[\"iv\"] += [sum(result[\"iv\"])] return result # ---# def", "must be numerical for Normalization\") return self.parent # ---# def", "n: int = 10): \"\"\" --------------------------------------------------------------------------- Returns the n largest", "One-Hot Encoding. vDataFrame[].mean_encode : Encodes the vColumn using the mean", "Computes the vDataFrame input aggregations. \"\"\" check_types([(\"x\", x, [int, float],", "optional Method to use to normalize. zscore : Normalization using", "tmp_query += ( \" WHERE {} IS NULL\".format(self.alias) if (category", "\"text\") if return_enum_trans: return trans else: self.transformations += [trans] sauv", "cube root ceil : value up to the next whole", "date-like vColumns.\" ) assert len(breaks) >= 2, ParameterError( \"Length of", "= 1 self.parent.__add_to_history__( \"[Normalize]: The vColumn '{}' was normalized with", "{} END)\".format(lower_when, upper_when, \"{}\") self.apply(func=func) return self.parent # ---# def", "Parameters ---------- lower: float, optional Lower bound. upper: float, optional", "See Also -------- vDataFrame[].dropna : Drops the vColumn missing values.", "FROM {}) VERTICAPY_OPTIMAL_H_TABLE\".format( min_date, self.alias, self.alias, self.parent.__genSQL__() ) query =", "6, h: float = 0, pie_type: str = \"auto\", ax=None,", "Iterable from typing import Union # VerticaPy Modules import verticapy", "with Label Encoding. vDataFrame[].mean_encode : Encodes the vColumn using the", "[int, float]), (\"dropna\", dropna, [bool])]) topk = \"\" if (k", "discretization\" ) total, query, nth_elems = nb, [], [] while", "warning_message = \"The vColumn {} has no mode (only missing", "Method to use to normalize. zscore : Normalization using the", "parameter 'x' will represent the number of seconds, otherwise it", "\"biserial\": {}, \"regr_avgx\": {}, \"regr_avgy\": {}, \"regr_count\": {}, \"regr_intercept\": {},", "be computed. nbins: int, optional Number of bins used for", "({3}{4} USING PARAMETERS percentile = 0.75) AS 'approx_75%', APPROXIMATE_PERCENTILE ({3}{4}", "TypeError(\"The vColumn must be numerical for Normalization\") return self.parent #", "return pre_comp assert n >= 1, ParameterError(\"Parameter 'n' must be", "[str]), (\"start_date\", start_date, [str, datetime.datetime, datetime.date]), (\"end_date\", end_date, [str, datetime.datetime,", "(schema): schema = \"public\" name = gen_tmp_name(schema=schema, name=\"kde\") if isinstance(xlim,", "optimized histogram nbins using Random Forest.\", method=\"fetchall\", ) result =", "Mean for the numerical and Mode for the categorical vColumns.", "as st return self.apply(func=st.decode(str_sql(\"{}\"), *argv)) # ---# def density( self,", "transformed. Parameters ---------- pat: str regular expression. Returns ------- vDataFrame", "not selecting it in the final generated SQL code. Note:", "is forbidden !\") return self.apply(func=\"{} / ({})\".format(\"{}\", x)) # ---#", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'unique' (cardinality). Parameters ----------", "boxplot(self, by, h, max_cardinality, cat_priority, ax=ax, **style_kwds) # ---# def", "Diaconis and Sturges. freedman_diaconis : Freedman Diaconis [2 * IQR", "the vColumn missing values. \"\"\" if isinstance(by, str): by =", "Verifies if the regular expression is in each of the", "\"({} - {}) / {}({})\".format( self.alias, avg, \"NULLIFZERO\" if (nullifzero)", "expression in each record of the vColumn. vDataFrame[].extract : Extracts", "\"\"\" --------------------------------------------------------------------------- Applies a function to the vColumn. Parameters ----------", "it is part of the DB version you are using.", "'Others'. cat_priority: list, optional List of the different categories to", "the vColumn category is date. vDataFrame[].isnum : Returns True if", "the comma. Parameters ---------- n: int Number of digits to", "vColumn {} named {} was added to the vDataFrame.\".format( self.alias,", "information, see utilities.tablesample. See Also -------- vDataFrame.aggregate : Computes the", "AS percent, AVG({3}{4}) AS mean, STDDEV({3}{4}) AS std, MIN({3}{4}) AS", "True if the vColumn is numerical. See Also -------- vDataFrame[].isbool", "int, optional Number of elements to display. offset: int, optional", "(\"AVG({}) OVER (PARTITION BY {})\".format(response, \"{}\"), \"int\", \"float\") ] self.parent.__update_catalog__(erase=True,", "self.parent.__get_catalog_value__(self.alias, \"store_usage\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp store_usage =", "format = 'tabular'))\".format( tmp_model_name, i ) for i in range(parameters[\"n_estimators\"])", "> 0.01: h = round(h, 2) elif h > 0.0001:", "vColumn. The vColumn will be transformed. Parameters ---------- start: int", "= method.lower() self.parent.are_namecols_in([elem for elem in order_by] + by) by", "\\ / # \\ / # \\/ # _ #", "n ** (1 / 3)] sturges : Sturges [CEIL(log2(n)) +", "of the slicing. Returns ------- vDataFrame self.parent See Also --------", "( self.alias.replace('\"', \"\") + prefix_sep.replace('\"', \"_\") if not (prefix) else", "executeSQL( query=query, title=\"Computing the equal frequency histogram bins.\", method=\"fetchall\", )", "Parameters ---------- threshold: float, optional Uses the Gaussian distribution to", "in (\"ffill\", \"pad\")) else \" DESC\" partition_by = ( \"PARTITION", "float A float between 0 and 1 that represents the", "cat_priority, ax=ax, **style_kwds) # ---# def category(self): \"\"\" --------------------------------------------------------------------------- Returns", "except Exception as e: raise QueryError( \"{}\\nError when applying the", "str(int(n)) if isinstance(top, decimal.Decimal): top = float(top) self.parent.__update_catalog__( {\"index\": [\"top{}\".format(n)],", "together to create a new category : 'Others'. cat_priority: list,", "floor_end ), \"varchar\", \"text\", ) else: trans = (\"FLOOR({}) ||", "to the vDataFrame.\".format( self.alias, name ) ) return self.parent #", "for k in range(max_floor): self.parent[copy_name].transformations += [ (\"{}\", self.ctype(), self.category())", "Interpolation). ffill : Propagation of the first element (Constant Interpolation).", "empty, an optimized number of bins will be computed. h:", "---# def __setattr__(self, attr, val): self.__dict__[attr] = val # #", "COUNT(*) AS count\" \" FROM vdf_table WHERE {0} IS NOT", "executeSQL( \"SELECT * FROM {} LIMIT 10\".format( self.parent.__genSQL__(force_columns=force_columns) ), print_time_sql=False,", "[(\"method\", method, [\"sturges\", \"freedman_diaconis\", \"fd\", \"auto\"])] ) method = method.lower()", "result[i][2] == None: pass elif math.isnan(result[i][2]): result[i][2] = None avg", "\"''\")) if elem[0] != None else \"NULL\", elem[2] if elem[2]", "vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes the vColumn", "\"\"\" check_types([(\"name\", name, [str])]) name = quote_ident(name.replace('\"', \"_\")) assert name.replace('\"',", "logarithm log : logarithm log10 : base 10 logarithm mod", "MINUTE / MONTH / QUARTER / SECOND / TIME ZONE", "order the data\" ) desc = \"\" if (method in", "the vColumn. Parameters ---------- threshold: float, optional Uses the Gaussian", "normalize {} using a Z-Score - The Standard Deviation is", "if the vColumn is numerical. \"\"\" return self.category() == \"date\"", "the vDataFrame by using an advanced analytical function on a", "/ / # \\ / # \\ / # \\/", ": base 10 logarithm mod : remainder of a division", "op1, close_l = \"<\", \"]\" if labels: label = labels[idx]", "to impute the missing values. auto : Mean for the", "else \"\" ) func = \"(CASE {}{}ELSE {} END)\".format(lower_when, upper_when,", "NULLS) OVER ({} ORDER BY {}))\".format( \"{}\", \"{}\", partition_by, order_by_ts", "p_1_alpha = ( -threshold * result[\"std\"][0] + result[\"avg\"][0], threshold *", "skewness See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "response: str Response vColumn. Returns ------- vDataFrame self.parent See Also", "--------------------------------------------------------------------------- Returns the tail of the vColumn. Parameters ---------- limit:", "self.parent.__add_to_history__( \"[Fillna]: {} {} missing value{} filled.\".format( total, self.alias, conj,", "vColumn outliers using the input method. \"\"\" check_types([(\"lower\", lower, [float,", "type numeric|date\" ) if self.isnum(): result = ( self.parent.describe( method=\"numerical\",", "- {}) / {} < {}\".format( self.alias, result[\"avg\"][0], result[\"std\"][0], threshold", "copy. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.eval :", "Parameters ---------- method: str, optional Method to use to fill", ") except: cmax, cmin = ( \"MAX({}) OVER (PARTITION BY", "> 0: try: if \"count\" in sauv: self.catalog[\"count\"] = int(sauv[\"count\"])", "< 0: index_start += self.parent.shape()[0] if isinstance(index_stop, int): if index_stop", "See Also -------- vDataFrame[].bar : Draws the Bar Chart of", "auto : Uses method 'same_width' for numerical vColumns, cast the", "the vColumn based on an aggregation. \"\"\" check_types( [ (\"method\",", "0: column = args[0] elif \"column\" in kwargs: column =", "# |_)\\/ |_)(_|(_|| \\_/|_|(_||| # / # ____________ ______ #", "lower and upper outliers.\".format( self.alias ), method=\"fetchall\", ) ] if", "max_cardinality ) if distinct_count > max_cardinality: query += ( \"UNION", "isinstance(values[elem][i], decimal.Decimal): values[elem][i] = float(values[elem][i]) return tablesample(values) # ---# def", "self.alias), self.alias, kwargs[\"agg\"], self.parent.__genSQL__(), self.alias, ) query_result = executeSQL( query=query,", "h, donut, rose, ax=None, **style_kwds, ) # ---# def plot(", "vColumn using a time series rule. \"\"\" return self.apply(func=\"DATE_PART('{}', {})\".format(field,", "See Also -------- vDataFrame.bar : Draws the Bar Chart of", "to use this method when first preparing your data. Parameters", "\"SELECT {} AS {} FROM {}{} LIMIT {} OFFSET {}\".format(", "method, [\"auto\", \"smart\", \"same_width\", \"same_freq\", \"topk\"], ), (\"return_enum_trans\", return_enum_trans, [bool]),", "= fd else: best_h = max(sturges, fd) self.parent.__update_catalog__({\"index\": [\"numh\"], self.alias:", "vDataFrame.analytic : Adds a new vColumn to the vDataFrame by", "if not (schema): schema = \"public\" name = gen_tmp_name(schema=schema, name=\"kde\")", "IS NOT NULL\".format(self.alias) if (dropna) else \"\" query = \"SELECT", "result.values[\"iv\"] += [sum(result[\"iv\"])] return result # ---# def kurtosis(self): \"\"\"", "self.aggregate([\"avg\", \"std\"]).values[self.alias] if stddev == 0: warning_message = \"Can not", "element. See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "as categorical (No h will be picked or computed) nbins:", "can not be converted to {}\".format( e, self.alias, dtype )", "sauv[\"mad\"] ) elif method == \"zscore\": self.catalog[elem] = (sauv[elem] -", "the vColumn. Returns ------- str/float/int vColumn nth most occurent element.", "from the vDataFrame. \"\"\" check_types([(\"add_history\", add_history, [bool])]) try: parent =", "';' || (FLOOR({} / {}) * {} + {}{}) ||", "{} AS {} FROM {} WHERE {} IS NOT NULL", "to determine if the vColumn will be considered as categorical.", "---------- y: str Response vColumn. nbins: int, optional Maximum number", "nullifzero = 0 cmin, cmax = self.aggregate([\"min\", \"max\"]).values[self.alias] if cmax", "Returns ------- ax Matplotlib axes object See Also -------- vDataFrame[].bar", "expression in each record of the vColumn. The vColumn will", "vDataFrame represents the entire relation, a vColumn can be seen", "optimised interval will be computed. nbins: int, optional Number of", "by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import boxplot return", "datetime.datetime, datetime.date]), (\"area\", area, [bool]), (\"step\", step, [bool]), ] )", "------- vDataFrame self.parent See Also -------- vDataFrame.add_copy : Creates a", "method in (\"same_width\", \"auto\"): if not (h) or h <=", "\"]\", \"]\" else: op1, op2, close_l, close_r = \"<=\", \"<\",", "a copy with the specified name. \\u26A0 Warning : SQL", "--------------------------------------------------------------------------- Returns the n largest vColumn elements. Parameters ---------- n:", "1) else str(int(n)) if isinstance(top, decimal.Decimal): top = float(top) self.parent.__update_catalog__(", "input method. Parameters ---------- method: str, optional Method to use", "be transformed. Parameters ---------- start: int Start of the slicing.", "# ---# def var(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "float = 0, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the", "in result if elem[1] != None ] ), ) stddev", "AS 'approx_25%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.5) AS", "VERTICAPY_SUBTABLE ORDER BY split_value::float\".format( \" UNION ALL \".join(query), nbins -", "copy will be created using the input Name. Returns -------", "all_cols, max_floor = self.parent.get_columns(), 0 for column in all_cols: try:", "Sturges. freedman_diaconis : Freedman Diaconis [2 * IQR / n", "geopandas.GeoDataFrame.plot.html Returns ------- ax Matplotlib axes object \"\"\" columns =", "p_1_alpha = executeSQL( query=query, title=\"Computing the quantiles of {}.\".format(self.alias), method=\"fetchrow\",", "be able to solve this issue.\" ) self.add_copy(new_name) parent =", "None), ParameterError( \"At least 'lower' or 'upper' must have a", "------- vDataFrame self.parent See Also -------- vDataFrame.fill_outliers : Fills the", "value.replace(\"'\", \"''\") ) ) # ---# def str_slice(self, start: int,", "to normalize by grouping by elements, please use a method", "title=title) # ---# def numh(self, method: str = \"auto\"): \"\"\"", ": trigonometric inverse cosine asin : trigonometric inverse sine atan", "vDataFrame[].label_encode : Encodes the vColumn with Label Encoding. vDataFrame[].get_dummies :", "{} IS NOT NULL ORDER BY {} DESC LIMIT {}\".format(", "1) query = \"WITH vdf_table AS (SELECT * FROM {})", "using a bijection from the different categories to [0, n", "Methods # # ---# def aad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "be binary to use iv_woe.\".format(y) ) self.parent[y].distinct() trans = self.discretize(", "defined Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes", "_verticapy_row_nb_ FROM {} WHERE {} IS NOT NULL) VERTICAPY_SUBTABLE {}\".format(", "\".join(by) ), \"STDDEV({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by)", "and upper outliers.\".format( self.alias ), method=\"fetchall\", ) ] if mean_alpha", "\", \"_\") .replace(\"/\", \"_\") .replace(\",\", \"_\") .replace(\"'\", \"_\") ) expr", "---------- dtype: str New type. Returns ------- vDataFrame self.parent See", "def product(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'product'. Returns", "\"float\": model = RandomForestRegressor(tmp_model_name) else: model = RandomForestClassifier(tmp_model_name) model.set_params({\"n_estimators\": 20,", "method == \"same_freq\": assert nbins >= 2, ParameterError( \"Parameter 'nbins'", "the input vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) assert x", "---# def str_contains(self, pat: str): \"\"\" --------------------------------------------------------------------------- Verifies if the", "is_numeric, is_date = ( self.nunique(), self.isnum(), self.isdate(), ) if (is_date)", "the vDataFrame. \"\"\" check_types([(\"add_history\", add_history, [bool])]) try: parent = self.parent", "You may not use this file except in compliance with", "aggregation (ex: AVG(column1) + 5). of: str, optional The vColumn", "ax.set_xlabel(self.alias) return ax kernel = kernel.lower() from verticapy.learn.neighbors import KernelDensity", "self.parent.filter(\"{} IS NOT NULL\".format(self.alias)) return self.parent # ---# def fill_outliers(", "created using the input Name. Returns ------- vDataFrame self.parent See", ": Encodes the vColumn using the mean encoding of a", "topk = \"\" if (k < 1) else \"LIMIT {}\".format(k)", "values lesser than the lower bound to the lower bound", "QUARTER / SECOND / TIME ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE", "= [elem[0] for elem in result] except: drop(tmp_view_name, method=\"view\") drop(tmp_model_name,", "result. For more information, see utilities.tablesample. See Also -------- vDataFrame.iv_woe", "to partition the data. method: str, optional The method to", "limit = \" LIMIT {}\".format(limit) else: limit = \"\" query", "\"auto\"): if not (h) or h <= 0: if nbins", "{\"color\": colors[idx % len(colors)]} ax = self.parent.search( \"{} = '{}'\".format(self.parent[by].alias,", "aggregation. \"\"\" check_types( [ (\"method\", method, [str]), (\"of\", of, [str]),", "'approx_90%', MAX({3}{4}) AS max FROM vdf_table\"\"\".format( category, self.alias, self.parent.shape()[0], numcol,", "from verticapy.plot import bar return bar(self, method, of, max_cardinality, nbins,", "Regular pie chart. donut : Donut chart. rose : Rose", "missing values. \"\"\" if isinstance(by, str): by = [by] if", "/ / # / / # \\ / # \\", "= \"APPROXIMATE_MEDIAN\" query = \"SELECT {}, {}({}) FROM {} GROUP", "): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'median'. Parameters ----------", "bool, optional If set to True, the Median will be", "# Methods # # ---# def aad(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "width bins. smart : Uses the Random Forest on a", "vDataFrame self.parent See Also -------- vDataFrame[].fill_outliers : Fills the vColumn", "= True): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using an input", "item in result], \"percent\": [float(round(item[2], 3)) for item in result],", "if mean_1_alpha == None: mean_alpha = \"NULL\" self.apply( func=\"(CASE WHEN", "median absolute deviation max : maximum mean : average median", "response column to find the most relevant interval to use", "self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index_start, limit, ) return vDataFrameSQL(query) elif isinstance(index, int):", "[str])]) return self.apply( func=\"REGEXP_REPLACE({}, '{}', '{}')\".format( \"{}\", to_replace.replace(\"'\", \"''\"), value.replace(\"'\",", "k in range(len(distinct_elements) - n): name = ( '\"{}{}\"'.format(prefix, k)", "self.catalog = { \"cov\": {}, \"pearson\": {}, \"spearman\": {}, \"spearmand\":", "= [], ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the box", "Size of the slicing. Returns ------- vDataFrame self.parent See Also", "def quantile(self, x: float, approx: bool = True): \"\"\" ---------------------------------------------------------------------------", "(\"start_date\", start_date, [str, datetime.datetime, datetime.date]), (\"end_date\", end_date, [str, datetime.datetime, datetime.date]),", "AS events FROM ({}) x GROUP BY 1\".format( self.alias, y,", "vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a function", "Plot. Parameters ---------- by: str, optional vColumn to use to", "), bin_spatial_to_str(self.category()), new_category.replace(\"'\", \"''\"), ), \"varchar\", \"text\", ) elif self.isnum()", "item[0] for item in executeSQL( query=query, title=\"Computing the average of", "\"DECODE({}, NULL, 0, 1)\" elif method in (\"mean\", \"avg\", \"median\"):", "len(colors)]} ax = self.parent.search( \"{} = '{}'\".format(self.parent[by].alias, column) )[self.alias].density( bandwidth=bandwidth,", "quantile(1-alpha) will be filled. Returns ------- vDataFrame self.parent See Also", "FROM {}) {}\".format( self.parent.__genSQL__(), \" UNION ALL \".join(query) ) title", "def category(self): \"\"\" --------------------------------------------------------------------------- Returns the category of the vColumn.", "the vColumn outliers using the input method. \"\"\" check_types([(\"lower\", lower,", "input vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) assert x !=", "except: ctype = get_data_types( \"SELECT {} AS apply_test_feature FROM {}", "return self.apply(func=\"{} * ({})\".format(\"{}\", x)) # ---# def nlargest(self, n:", "when first preparing your data. Parameters ---------- new_name: str The", "was filled.\") self.transformations = [elem for elem in copy_trans] for", "= vColumn( name, parent=self.parent, transformations=[item for item in self.transformations], catalog=self.catalog,", "in enumerate(result): result[idx][0] = ( \"NULL\" if (elem[0] == None)", "AS percent FROM {}{} GROUP BY {} ORDER BY _verticapy_cnt_", "by applicable law or agreed to in writing, software #", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"sum\"]).values[self.alias][0]", "in result] except: drop(tmp_view_name, method=\"view\") drop(tmp_model_name, method=\"model\") raise drop(tmp_view_name, method=\"view\")", "is date like). The vColumn will be transformed. Parameters ----------", "of bins. If empty, an optimized number of bins will", ") .transpose() .values[self.alias] ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max =", "[bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) from verticapy.plot import", "represents the quantile. For example: 0.25 represents Q1. approx: bool,", "If this parameter is equal to 0, an optimised interval", "axes object See Also -------- vDataFrame.boxplot : Draws the Box", "-------- vDataFrame[].dropna : Drops the vColumn missing values. \"\"\" if", "NOT NULL) VERTICAPY_SUBTABLE {}\".format( self.alias, self.alias, self.alias, self.parent.__genSQL__(), self.alias, where,", "machine learning features. It supports the # entire data science", "is available only if the parameter 'by' is empty\\nIf you", "to transform the vColumn. abs : absolute value acos :", "(\"figsize\" in kwargs): kwargs[\"figsize\"] = (14, 10) return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs)", "to replace. value: str, optional New value. Returns ------- vDataFrame", "self.apply(func=\"{} - ({})\".format(\"{}\", x)) # ---# def sum(self): \"\"\" ---------------------------------------------------------------------------", "global outlier). \"\"\" if isinstance(method, str): method = method.lower() check_types(", "most occurent elements, how often they occur, and other statistical", "Store Usage of the vColumn {}.\".format(self.alias), method=\"fetchfirstelem\", ) self.parent.__update_catalog__( {\"index\":", "to partition the data. h: float, optional Interval width if", ") mean_alpha, mean_1_alpha = [ item[0] for item in executeSQL(", "minimum mode : most occurent element percent : percent of", "Quantile of the vColumn 'of' (ex: 50% to get the", "a specific vColumn. \"\"\" return self.parent.aggregate(func=func, columns=[self.alias]).transpose() agg = aggregate", "input Name. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.apply", "\\/_____/ \\/_____/ \\/_____/ \\/_/ \\/_/ \\/_/ \\/_/ # # #", "feature{}created: {}\".format( self.alias, len(all_new_features), conj, \", \".join(all_new_features) ) + \".\"", "def add(self, x: float): \"\"\" --------------------------------------------------------------------------- Adds the input element", "verticapy.toolbox import * from verticapy.errors import * ## # #", "non-Missing elements). Returns ------- int number of non-Missing elements. See", "sauv[\"max\"] - sauv[\"min\"] ) except: pass if method == \"robust_zscore\":", "index_start < 0: index_start += self.parent.shape()[0] if isinstance(index_stop, int): if", "the computation. Returns ------- tablesample An object containing the result.", "of nbins. If empty, an optimized number of nbins will", "to the input vColumn. \"\"\" return self.apply(func=\"ABS({})\") # ---# def", "**kwargs): \"\"\" --------------------------------------------------------------------------- Draws the Geospatial object. Parameters ---------- *args", "func=\"(CASE WHEN ({} BETWEEN {} AND {}) THEN {} ELSE", "verticapy.learn.neighbors import KernelDensity schema = verticapy.options[\"temp_schema\"] if not (schema): schema", "alias, str : vColumn alias. catalog, dict : Catalog of", "number of decimal places sign : arithmetic sign sin :", "\"topk\", nbins=nbins, k=nbins, new_category=\"Others\", return_enum_trans=True, )[0].replace(\"{}\", self.alias) query = \"SELECT", "statistical information. Parameters ---------- k: int, optional Number of most", "float): \"\"\" --------------------------------------------------------------------------- Adds the input element to the vColumn.", "by, [list]), (\"order_by\", order_by, [list]), ] ) method = method.lower()", "'{}', '{}')\".format( \"{}\", to_replace.replace(\"'\", \"''\"), value.replace(\"'\", \"''\") ) ) #", "max). (x - min) / (max - min) by: list,", "---------- method: str, optional Method to use to compute the", "only available on type numeric|date\" ) if self.isnum(): result =", "k: int, optional The integer k of the 'topk' method.", "using a Robust Z-Score - The MAD is null !\".format(", "be > 1) Returns ------- tablesample An object containing the", "if the regular expression is in each of the vColumn", "pie_type, [\"auto\", \"donut\", \"rose\"]), ] ) donut = True if", "will be picked or computed) h: float, optional Interval width", "): \"\"\" --------------------------------------------------------------------------- Draws the box plot of the vColumn.", "the creation of correlated features. use_numbers_as_suffix: bool, optional Uses numbers", "alias {name}.\\nBy changing the parameter 'name', you'll be able to", "[str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int, float]), (\"nbins\",", "drastically decrease. Returns ------- int vColumn cardinality (or approximate cardinality).", "vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. vDataFrame[].mean_encode :", "frequency histogram bins.\", method=\"fetchall\", ) result = [elem[0] for elem", "categories to consider when drawing the box plot. The other", "self.alias, self.parent.shape()[0], numcol, cast, ) tmp_query += ( \" WHERE", "It can also be a cutomized aggregation (ex: AVG(column1) +", ") ] else: warning_message = \"Can not normalize {} using", "was transformed with the func 'x -> {}'.\".format( copy_name.replace('\"', \"\"),", "by using an advanced analytical function on a specific vColumn.", "= \"SELECT {} AS {} FROM {} WHERE {} IS", "has already the alias {name}.\\nBy changing the parameter 'name', you'll", "is set to 'cat_stats'. Returns ------- tablesample An object containing", "applicable law or agreed to in writing, software # distributed", "{}, \"regr_intercept\": {}, \"regr_r2\": {}, \"regr_slope\": {}, \"regr_sxx\": {}, \"regr_sxy\":", "Encodes the vColumn using the average of the response partitioned", "use this method when first preparing your data. Parameters ----------", "Returns ------- vDataFrame The vDataFrame of the search. See Also", "\"({} - {}) / ({})\".format(\"{}\", med, mad), \"float\", \"float\", )", "\"MIN({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ), )", "this \" \"issue.\" ) for k in range(len(distinct_elements) - n):", "(\"use_threshold\", use_threshold, [bool]), (\"threshold\", threshold, [int, float]), ] ) if", "if n = 1 then this method will return the", "float(top) self.parent.__update_catalog__( {\"index\": [\"top{}\".format(n)], self.alias: [top]} ) return top #", "pass if method == \"robust_zscore\": self.catalog[\"median\"] = 0 self.catalog[\"mad\"] =", "trigonometric inverse tangent cbrt : cube root ceil : value", "\"vColumn {} must be binary to use iv_woe.\".format(y) ) response_cat", "\"{}\", fun, \"{}\", \", \".join(by) ) elif method in (\"ffill\",", "the vColumn using the mean encoding of a response. \"\"\"", "right. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply :", "def dropna(self): \"\"\" --------------------------------------------------------------------------- Filters the vDataFrame where the vColumn", "self.aggregate([\"min\"]).values[self.alias][0] # ---# def mode(self, dropna: bool = False, n:", "result = [distinct_count] + result index = [ \"unique\", \"count\",", "method: str, optional The method to use to aggregate the", "\"same_freq\") or ( self.isnum() and method == \"smart\" ): n", ": cube root ceil : value up to the next", "if approx else \"\" return self.aggregate(func=[prefix + \"{}%\".format(x * 100)]).values[self.alias][", "optional Prefix of the dummies. prefix_sep: str, optional Prefix delimitor", "xmin = self.min() xmax = self.max() else: xmin, xmax =", "optional Number of bins used for the discretization (must be", "\"SELECT {} AS {}, {} AS ord, {}::int AS {}", "try: if (quote_ident(column) in func) or ( re.search( re.compile(\"\\\\b{}\\\\b\".format(column.replace('\"', \"\"))),", "Creates a new feature by evaluating some conditions. vDataFrame[].discretize :", "True if not (\"figsize\" in kwargs): kwargs[\"figsize\"] = (14, 10)", "# ---# def drop_outliers( self, threshold: float = 4.0, use_threshold:", "self.distinct() if len(cat) == 1: lp, rp = \"(\", \")\"", "list, optional Labels used to name the new categories. If", "self, method: str = \"zscore\", by: list = [], return_trans:", "<= 0: limit = 0 limit = \" LIMIT {}\".format(limit)", "\"{}\", p_alpha, p_1_alpha, \"{}\" ) ) elif method == \"mean\":", "!\".format( self.alias ) warnings.warn(warning_message, Warning) return self elif (n ==", "'x -> {}' to '{}'\".format( e, func.replace(\"{}\", \"x\"), self.alias.replace('\"', \"\")", "int(float(count / int(nbins))) assert nb != 0, Exception( \"Not enough", "else: limit = \"\" query = \"(SELECT {} FROM {}{}", "ELSE NULL END)\".format( \"{}\", p_alpha, p_1_alpha, \"{}\" ) ) elif", "the median). It can also be a cutomized aggregation (ex:", "otherwise it will represent a number. Returns ------- vDataFrame self.parent", "acos : trigonometric inverse cosine asin : trigonometric inverse sine", "vColumn using 'std' (Standard Deviation). Returns ------- float std See", "# ---# def str_replace(self, to_replace: str, value: str = \"\"):", "= KernelDensity( name, bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=xlim_tmp, store=False, ) try:", "vColumn records by an input value. \"\"\" check_types([(\"start\", start, [int,", "# ---# def drop(self, add_history: bool = True): \"\"\" ---------------------------------------------------------------------------", "\"Reads {} {} largest elements.\".format(self.alias, n) return to_tablesample(query, title=title) #", "is set to False else the lower and upper ZScores.", "self.parent.format_colnames(response) assert self.parent[response].isnum(), TypeError( \"The response column must be numerical", "the bar chart of the vColumn based on an aggregation.", "the upper and lower outliers by their respective average. null", "\"_\", drop_first: bool = True, use_numbers_as_suffix: bool = False, ):", "'{}'\".format( bin_spatial_to_str(self.category(), self.alias), category, ) ) query += [lp +", "!= \"robust_zscore\": max_floor = 0 for elem in by: if", "> 0.0001: h = round(h, 4) elif h > 0.000001:", "[str]), (\"value\", value, [str])]) return self.apply( func=\"REGEXP_REPLACE({}, '{}', '{}')\".format( \"{}\",", "\"sin\", \"sinh\", \"sqrt\", \"tan\", \"tanh\", ], ), (\"x\", x, [int,", "using the MinMax (min and max). (x - min) /", "------- float/str minimum See Also -------- vDataFrame.aggregate : Computes the", "elif self.isdate(): min_date = self.min() table = \"(SELECT DATEDIFF('second', '{}'::timestamp,", "ctype = get_data_types( \"SELECT {} AS apply_test_feature FROM {} WHERE", "\"max\"]).values[self.alias] if cmax - cmin == 0: warning_message = \"Can", "== None: warning_message = \"The vColumn {} has no mode", "Encoding. vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding", "\"the parameters ('prefix', 'prefix_sep'), you'll be able to solve this", "\"\"\" import verticapy.stats as st return self.apply(func=st.decode(str_sql(\"{}\"), *argv)) # ---#", "str / date, optional Input End Date. For example, time", "because of ambiguous columns naming.\\nBy changing one of \" \"the", "the vColumn database type. \"\"\" return self.transformations[-1][2] # ---# def", "record is greater than the threshold it will be considered", "---# def mul(self, x: float): \"\"\" --------------------------------------------------------------------------- Multiplies the vColumn", "\"SELECT {}, ord, non_events, events, non_events / NULLIFZERO(SUM(non_events) OVER ())", "to find the most relevant interval to use for the", "Standard Deviation is null !\".format( self.alias ) warnings.warn(warning_message, Warning) return", "to 2 in case of discretization using the method 'smart'.\"", "{} smallest elements.\".format(n, self.alias) return to_tablesample(query, title=title) # ---# def", "f\"A vColumn has already the alias of one of the", "set to True, the lowest element of the list will", "function variable must be composed of two flower brackets {}.", "start_or_end ) ) # ---# def spider( self, by: str", "str = \"\", start_date: Union[str, datetime.datetime, datetime.date] = \"\", end_date:", "else \" \" result = executeSQL( \"SELECT {} FROM (SELECT", "= \"approx_\" if approx else \"\" return self.aggregate(func=[prefix + \"{}%\".format(x", "float]), (\"h\", h, [int, float]), (\"nbins\", nbins, [int, float]), ]", "1] if right: op1, op2, close_l, close_r = \"<\", \"<=\",", ") if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import", "(self.max() - self.min()) * 1.01 / nbins if h >", "\"cos\", \"cosh\", \"cot\", \"exp\", \"floor\", \"ln\", \"log\", \"log10\", \"mod\", \"pow\",", "THEN {} \".format(\"{}\", lower, lower) if (isinstance(lower, (float, int))) else", "vColumn. Parameters ---------- x: float If the vColumn type is", "print(\"{} element{}filled.\".format(total, conj)) self.parent.__add_to_history__( \"[Fillna]: {} {} missing value{} filled.\".format(", "--------------------------------------------------------------------------- Aggregates the vColumn using 'unique' (cardinality). Parameters ---------- approx:", "is numerical, False otherwise. Returns ------- bool True if the", "only the input number of digits after the comma. Parameters", "element to use to order the data\" ) desc =", "expr: new_column = \"COALESCE({}, {})\".format(\"{}\", expr) elif method == \"0ifnull\":", "The vColumn will be transformed. Parameters ---------- pat: str Regular", "str = \"auto\", max_cardinality: int = 6, numcol: str =", "bandwidth, [int, float]), (\"nbins\", nbins, [float, int]), ] ) if", "self elif (n == 1) and (self.parent[by[0]].nunique() < 50): try:", "\"[AsType]: The vColumn {} was converted to {}.\".format( self.alias, dtype", "count = self.count() nb = int(float(count / int(nbins))) assert nb", "{} WHERE {} IS NOT NULL LIMIT 20\".format( self.alias, dtype,", "int]), ] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from", "boolean, False otherwise. Returns ------- bool True if the vColumn", "GROUP (ORDER BY {}) OVER () FROM {} LIMIT 1\".format(", "self.max() else: xmin, xmax = xlim custom_lines = [] columns", "\"[\", \"[\" if idx == 0 and include_lowest: op1, close_l", "the computation. cat_stats : Computes statistics of a numerical column", "(self.parent.is_colname_in(new_name)), NameError( f\"A vColumn has already the alias {new_name}.\\nBy changing", "self.topk(k).values[\"index\"] trans = ( \"(CASE WHEN {} IN ({}) THEN", "self.alias, self.alias, p_alpha, self.alias, self.alias, p_1_alpha, ) mean_alpha, mean_1_alpha =", "by the NULL value. winsorize : Clips the vColumn using", "Matplotlib axes object See Also -------- vDataFrame.donut : Draws the", "ax.set_title(\"KernelDensity\") ax.legend( custom_lines, columns, title=by, loc=\"center left\", bbox_to_anchor=[1, 0.5], )", "{} IS NULL\".format(self.alias) if (category in (\"None\", None)) else \"", "- {}) / {}({} - {})\".format( self.alias, cmin, \"NULLIFZERO\" if", "), \"apply_test_feature\", ) category = get_category_from_vertica_type(ctype=ctype) all_cols, max_floor = self.parent.get_columns(),", "# ---# def fill_outliers( self, method: str = \"winsorize\", threshold:", "[int, float])]) return self.apply(func=\"ROUND({}, {})\".format(\"{}\", n)) # ---# def sem(self):", ") return self.parent # ---# def mad(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "\"MAX({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ), \"MIN({})", "): \"\"\" --------------------------------------------------------------------------- Drops outliers in the vColumn. Parameters ----------", "the vColumn. labels: list, optional Labels used to name the", "Returns ------- float/str minimum See Also -------- vDataFrame.aggregate : Computes", "---# def bar( self, method: str = \"density\", of: str", "percent of non-missing elements q% : q quantile (ex: 50%", ": Applies a function to all the vColumns. vDataFrame.eval :", "greater or equal to 1\") where = \" WHERE {}", "or parameter 'labels' must be empty.\" ) conditions, column =", "if verticapy.options[\"print_info\"]: print(\"{} element{}filled.\".format(total, conj)) self.parent.__add_to_history__( \"[Fillna]: {} {} missing", "0: try: if \"count\" in sauv: self.catalog[\"count\"] = int(sauv[\"count\"]) +", "\", \".join(by) ), ) if return_trans: return \"({} - {})", "(\"by\", by, [list]), (\"order_by\", order_by, [list]), ] ) method =", "(self.parent.is_colname_in(name)), NameError( f\"A vColumn has already the alias of one", "VerticaPy brings the logic to the data. # # #", "be empty\" ) assert not (self.parent.is_colname_in(name)), NameError( f\"A vColumn has", "if fun == \"MEDIAN\": fun = \"APPROXIMATE_MEDIAN\" query = \"SELECT", "!= 0: if return_trans: return \"({} - {}) / ({})\".format(self.alias,", "Response vColumn when method is set to 'smart'. return_enum_trans: bool,", "float = 0.05 ): \"\"\" --------------------------------------------------------------------------- Drops outliers in the", "---# def fillna( self, val=None, method: str = \"auto\", expr:", "self.median() new_column = \"COALESCE({}, {})\".format(\"{}\", val) elif (len(by) == 1)", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"kurtosis\"]).values[self.alias][0] kurt", "[2018-2022] Micro Focus or one of its affiliates. # Licensed", "_verticapy_cnt_ ASC LIMIT 1\".format( self.alias, self.alias, self.parent.__genSQL__(), where, self.alias, n", "of nbins used for the discretization (must be > 1)", "= self.parent[y].distinct() response_cat.sort() assert response_cat == [0, 1], TypeError( \"vColumn", "str The field to extract. It must be one of", "do not apply it. This parameter is very useful for", "a copy vColumn to the parent vDataFrame. Parameters ---------- name:", "binary to use iv_woe.\".format(y) ) self.parent[y].distinct() trans = self.discretize( method=\"same_width\"", "filter the data when 'ts' is greater than November 1993", "[\"\"] result.values[\"pt_events\"] += [\"\"] result.values[\"woe\"] += [\"\"] result.values[\"iv\"] += [sum(result[\"iv\"])]", "vColumn to use to partition the TS. start_date: str /", "self.catalog self.parent.__add_to_history__( \"[Apply]: The vColumn '{}' was transformed with the", "approx: bool = True): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "ax=ax, **style_kwds, ) # ---# def rename(self, new_name: str): \"\"\"", "ord, non_events, events, non_events / NULLIFZERO(SUM(non_events) OVER ()) AS pt_non_events,", "vDataFrame can have multiple children vColumns whereas one vColumn can", "str): \"\"\" --------------------------------------------------------------------------- Computes the number of matches for the", "asin : trigonometric inverse sine atan : trigonometric inverse tangent", "simply not selecting it in the final generated SQL code.", "vDataFrame[].str_count : Computes the number of matches for the regular", "name.replace(\" \", \"_\") .replace(\"/\", \"_\") .replace(\",\", \"_\") .replace(\"'\", \"_\") )", "is 'cat_stats'\" ) distinct_count, is_numeric, is_date = ( self.nunique(), self.isnum(),", "= executeSQL( query=query, title=\"Computing the equal frequency histogram bins.\", method=\"fetchall\",", "k most occurent elements and their distributions as percents. Parameters", "vColumn with One-Hot Encoding. vDataFrame[].label_encode : Encodes the vColumn with", ") result[idx][1] = \"NULL\" if (elem[1] == None) else str(elem[1])", "isdate(self): \"\"\" --------------------------------------------------------------------------- Returns True if the vColumn category is", ": Gaussian kernel. logistic : Logistic kernel. sigmoid : Sigmoid", "Parameters ---------- method: str, optional Method to use to compute", "method is set to 'cat_stats'. Returns ------- tablesample An object", "return self.iloc(limit=limit) # ---# def hist( self, method: str =", "type of pie chart. auto : Regular pie chart. donut", "Value (IV) Table. \"\"\" check_types([(\"y\", y, [str]), (\"nbins\", nbins, [int])])", "defined Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].get_dummies : Encodes", "({3}{4} USING PARAMETERS percentile = 0.25) AS 'approx_25%', APPROXIMATE_PERCENTILE ({3}{4}", "\"text\") else: trans = (\"{} || ''\", \"varchar\", \"text\") if", "Plot. start_date: str / date, optional Input Start Date. For", "------- float sum See Also -------- vDataFrame.aggregate : Computes the", "For more information, see utilities.tablesample. See Also -------- vDataFrame[].nlargest :", "usage (unit: b). Returns ------- int vColumn expected store usage.", "0, pie_type: str = \"auto\", ax=None, **style_kwds, ): \"\"\" ---------------------------------------------------------------------------", "type. Parameters ---------- dtype: str New type. Returns ------- vDataFrame", "= 0.5) AS 'approx_50%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile =", "2.0 (the \"License\"); # You may not use this file", "except Exception as e: raise ConversionError( \"{}\\nThe vColumn {} can", "of the vColumn records. vDataFrame[].str_count : Computes the number of", ") for k in range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())]", "/\\ \\ /\\ \\/\\ \\ /\\ \"-./ \\ /\\ \"-.\\", "\"<\", \"[\", \"[\" if idx == 0 and include_lowest: op1,", "Copy]: A copy of the vColumn {} named {} was", ": cardinality (count distinct) var : variance Other aggregations could", "), \"float\", \"float\", ) ] if method != \"robust_zscore\": max_floor", "the vDataFrame input aggregations. \"\"\" check_types( [ (\"method\", method, [\"auto\",", ")[0].replace(\"{}\", self.alias) query = \"SELECT {} AS {}, {} AS", "parameter to False, the function's performance can drastically decrease. Returns", ") # ---# def str_slice(self, start: int, step: int): \"\"\"", "list)): xlim_tmp = [xlim] else: xlim_tmp = [] model =", "labels: label = labels[idx] else: label = f\"{close_l}{first_elem};{second_elem}{close_r}\" conditions +=", "bool, optional Uses the threshold instead of the 'alpha' parameter.", "Label Encoding was applied to the vColumn {} using the", "object See Also -------- vDataFrame[].hist : Draws the histogram of", "is empty or invalid. max_cardinality: int, optional Maximum number of", "+ result index = [ \"unique\", \"count\", \"mean\", \"std\", \"min\",", "avg, \"NULLIFZERO\" if (nullifzero) else \"\", stddev ), \"float\", \"float\",", ") result = [distinct_count, self.count()] + [item[1] for item in", ": vColumn alias. catalog, dict : Catalog of pre-computed aggregations.", "/ ( 1.4826 * sauv[\"mad\"] ) elif method == \"zscore\":", "result. For more information, see utilities.tablesample. See Also -------- vDataFrame.analytic", "(\"ts\", ts, [str]), (\"by\", by, [str]), (\"start_date\", start_date, [str, datetime.datetime,", "method = method.lower() assert (method != \"cat_stats\") or (numcol), ParameterError(", "WITHIN GROUP (ORDER BY {}) OVER () FROM {} LIMIT", "self.alias, self.parent.__genSQL__() ) p_alpha, p_1_alpha = executeSQL( query=query, title=\"Computing the", "\"sinh\", \"sqrt\", \"tan\", \"tanh\", ], ), (\"x\", x, [int, float]),", "return self.aggregate([\"mad\"]).values[self.alias][0] # ---# def max(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"stddev\"]).values[self.alias][0]", "Computes the optimal vColumn bar width. Parameters ---------- method: str,", "------- ax Matplotlib axes object See Also -------- vDataFrame[].hist :", "if the vColumn is numerical, False otherwise. Returns ------- bool", "( parent, alias, [elem for elem in transformations], ) self.catalog", "= \"{}({}, {})\".format(func.upper(), \"{}\", x) return self.apply(func=expr) # ---# def", "Looks if some specific records are in the vDataFrame. \"\"\"", "self.add_copy(name=copy_name) for k in range(max_floor): self.parent[copy_name].transformations += [ (\"{}\", self.ctype(),", "composed of two flower brackets {}. For example to apply", "---# def get_dummies( self, prefix: str = \"\", prefix_sep: str", "check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}') > 0\".format(\"{}\", pat.replace(\"'\",", "\".join(by) ), ) if return_trans: return \"({} - {}) /", "if the vColumn is numerical. See Also -------- vDataFrame[].isbool :", "[0, n - 1] (n being the vColumn cardinality). Returns", "cmax = self.aggregate([\"min\", \"max\"]).values[self.alias] if cmax - cmin == 0:", "self.alias, self.alias, y, y, self.parent.__genSQL__(), ) query = \"SELECT {},", "---------- alias: str vColumn alias. transformations: list, optional List of", "(\"log\", \"mod\", \"pow\", \"round\"): expr = \"{}({})\".format(func.upper(), \"{}\") else: expr", "self.alias, cmin, \"NULLIFZERO\" if (nullifzero) else \"\", cmax, cmin, )", "cat_priority, [list]), ] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by)", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame.filter: Filters the", "the method on the vColumn \" \"or simply because of", "{}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if elem[0] != None else \"NULL\", elem[2]", "fill_outliers( self, method: str = \"winsorize\", threshold: float = 4.0,", "vDataFrame[].head : Returns the head of the vColumn. \"\"\" return", "frequent categories and merge the other into one unique category.", "= RandomForestClassifier(tmp_model_name) model.set_params({\"n_estimators\": 20, \"max_depth\": 8, \"nbins\": 100}) model.set_params(RFmodel_params) parameters", "== \"null\": self.apply( func=\"(CASE WHEN ({} BETWEEN {} AND {})", "\"pad\", \"bfill\", \"backfill\", ], ), (\"expr\", expr, [str]), (\"by\", by,", "your data. Parameters ---------- new_name: str The new vColumn alias.", "topk(self, k: int = -1, dropna: bool = True): \"\"\"", "Returns ------- float average See Also -------- vDataFrame.aggregate : Computes", "a specific TS field from the vColumn (only if the", "\"density\", of: str = \"\", max_cardinality: Union[int, tuple] = (6,", "\"NULL\", elem[2] if elem[2] != None else \"NULL\", ) for", "New value. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains", "index_stop - index_start if limit <= 0: limit = 0", "'method' is set to 'smart'. A RF Regressor will be", "of vColumn based on an aggregation. \"\"\" check_types( [ (\"method\",", "return self.aggregate(func=[prefix + \"{}%\".format(x * 100)]).values[self.alias][ 0 ] # ---#", "mean : Average. median : median. mode : mode (most", "descriptive statistics of {}.\".format(self.alias), method=\"fetchall\", ) result = [distinct_count, self.count()]", "bandwidth of the kernel. kernel: str, optional The method used", "It is only valid if the vColumns are numerical. Optimized", "# # Special Methods # # ---# def __init__( self,", "query = \"SELECT {} AS {}, COUNT(*) AS _verticapy_cnt_, 100", "method is in ffill|pad|bfill|backfill then 'order_by' must be a list", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"kurtosis\"]).values[self.alias][0] kurt = kurtosis", "title=by, loc=\"center left\", bbox_to_anchor=[1, 0.5], ) ax.set_xlabel(self.alias) return ax kernel", "method == \"auto\": method = \"mean\" if (self.isnum() and self.nunique(True)", "end_date: Union[str, datetime.datetime, datetime.date] = \"\", area: bool = False,", "---------- lower: float, optional Lower bound. upper: float, optional Upper", "predictive power of an independent variable in relation to the", "func: list): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using the input", "str Name of the copy. Returns ------- vDataFrame self.parent See", "expression matches in each of the vColumn records by an", "using the mean encoding of a response. \"\"\" if self.category()", "be defined. numerical : Uses popular numerical aggregations during the", "< {}) UNION ALL (SELECT AVG({}) FROM vdf_table WHERE {}", "pre_comp = self.parent.__get_catalog_value__(self.alias, \"store_usage\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp", "= True, right: bool = True, ): \"\"\" --------------------------------------------------------------------------- Discretizes", "is modified. Attributes ---------- alias, str : vColumn alias. catalog,", "self.parent See Also -------- vDataFrame.outliers : Computes the vDataFrame Global", "was applied to the vColumn {} using the following mapping:{}\".format(", "\"pad\", \"bfill\", \"backfill\"]: all_partition += [elem for elem in order_by]", "vDataFrame[].tail : Returns the a part of the vColumn. \"\"\"", "The function variable must be composed of two flower brackets", "n = len(result) trans = \"(CASE \" for i in", "[ (\"by\", by, [str]), (\"method\", method, [str]), (\"of\", of, [str]),", "missing elements in the vColumn with a user-specified rule. Parameters", "/ MICROSECONDS / MILLENNIUM / MILLISECONDS / MINUTE / MONTH", "max_floor = 0 for elem in by: if len(self.parent[elem].transformations) >", "result[\"avg\"][0], ) else: query = \"SELECT PERCENTILE_CONT({}) WITHIN GROUP (ORDER", "vColumn to use when the parameter method is set to", "3rd. end_date: str / date, optional Input End Date. For", "will represent a number. Returns ------- vDataFrame self.parent See Also", "= \"\", h: float = 0, max_cardinality: int = 8,", "AVG({}) FROM vdf_table WHERE {} < {}) UNION ALL (SELECT", "--------------------------------------------------------------------------- Draws the box plot of the vColumn. Parameters ----------", "= '{}'\".format(self.parent[by].alias, column) )[self.alias].density( bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=(xmin, xmax), ax=ax,", "used for the discretization (must be > 1) Returns -------", "\"categorical\")) or ( method == \"is_numeric\" ): self.parent.__update_catalog__({\"index\": index, self.alias:", "), ) if return_trans: return \"({} - {}) / {}({})\".format(", "of the different records. For example, to check if Badr", "\"\"\" return self.aggregate([\"mad\"]).values[self.alias][0] # ---# def max(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "aggregations. \"\"\" return self.quantile(0.5, approx=approx) # ---# def memory_usage(self): \"\"\"", "See Also -------- vDataFrame.boxplot : Draws the Box Plot of", "{}) (SELECT AVG({}) FROM vdf_table WHERE {} < {}) UNION", "the box plot of the vColumn. Parameters ---------- by: str,", "beautiful graphical options. # # VerticaPy aims to do all", "'{}' END)\".format( bin_spatial_to_str(self.category()), \", \".join( [ \"'{}'\".format(str(elem).replace(\"'\", \"''\")) for elem", "20\".format( self.alias, dtype, self.alias, self.parent.__genSQL__(), self.alias ) executeSQL(query, title=\"Testing the", "isinstance(val, float): category, ctype = \"float\", \"float\" elif method ==", "\"regr_r2\": {}, \"regr_slope\": {}, \"regr_sxx\": {}, \"regr_sxy\": {}, \"regr_syy\": {},", "pat.replace(\"'\", \"''\")) ) # ---# def str_replace(self, to_replace: str, value:", "in self.parent._VERTICAPY_VARIABLES_[\"columns\"] ] force_columns.remove(self.alias) executeSQL( \"SELECT * FROM {} LIMIT", ") # ---# def rename(self, new_name: str): \"\"\" --------------------------------------------------------------------------- Renames", "nbins, [int, float]), (\"h\", h, [int, float]), ] ) if", "\"int\")] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.catalog[\"count\"] = self.parent.shape()[0] self.catalog[\"percent\"] = 100 self.parent.__add_to_history__(", "for item in query_result] else: result = ( self.parent.describe( method=\"numerical\",", "THEN {} WHEN {} > {} THEN {} ELSE {}", "--------------------------------------------------------------------------- Computes the number of matches for the regular expression", "# / / # \\ / # \\ / #", "absolute value of the record is greater than the threshold", "or invalid. max_cardinality: int, optional Maximum number of vColumn distinct", "the vColumn. \"\"\" return self.iloc(limit=limit) # ---# def hist( self,", "ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS iv FROM ({}) x ORDER", "approximate cardinality). See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "categories. Parameters ---------- response: str Response vColumn. Returns ------- vDataFrame", "if k > 0 else \"\", self.alias ), method=\"fetchall\", )", "bandwidth: float = 1.0, kernel: str = \"gaussian\", nbins: int", "= \"\", bandwidth: float = 1.0, kernel: str = \"gaussian\",", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"sum\"]).values[self.alias][0] #", "\"\"\" --------------------------------------------------------------------------- Drops outliers in the vColumn. Parameters ---------- threshold:", "and Fouad are in the vColumn. You can write the", "> 1) k: int, optional The integer k of the", "\"smart\": schema = verticapy.options[\"temp_schema\"] if not (schema): schema = \"public\"", "def spider( self, by: str = \"\", method: str =", "return bar(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds) #", "Date. For example, time = '03-11-1993' will filter the data", "True, right: bool = True, ): \"\"\" --------------------------------------------------------------------------- Discretizes the", "/ sauv[ \"std\" ] elif method == \"minmax\": self.catalog[elem] =", "Also -------- vDataFrame[].ctype : Returns the vColumn database type. \"\"\"", "outlier. use_threshold: bool, optional Uses the threshold instead of the", "------- float aad See Also -------- vDataFrame.aggregate : Computes the", "cat: tmp_query = \"\"\"SELECT '{0}' AS 'index', COUNT({1}) AS count,", "= self.parent.format_colnames(by) nullifzero, n = 1, len(by) if self.isbool(): warning_message", "vColumn distinct elements to be used as categorical. The less", "{}\".format( trans, self.alias, self.alias, y, y, self.parent.__genSQL__(), ) query =", "must be date like (date, datetime, timestamp...) or numerical. q:", "---# def value_counts(self, k: int = 30): \"\"\" --------------------------------------------------------------------------- Returns", "Also -------- vDataFrame.analytic : Adds a new vColumn to the", "\"\"\" check_types([(\"n\", n, [int, float])]) return self.apply(func=\"ROUND({}, {})\".format(\"{}\", n)) #", "--------------------------------------------------------------------------- Applies a default function to the vColumn. Parameters ----------", "\", \".join( [ \"'{}'\".format(str(elem).replace(\"'\", \"''\")) for elem in distinct ]", "ts: str, q: tuple = (0.25, 0.75), start_date: Union[str, datetime.datetime,", "(\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int, float]), (\"nbins\", nbins,", "nth_elems += [str(total)] total += nb where = \"WHERE _verticapy_row_nb_", "vDataFrame.\".format(self.alias) ) return parent # ---# def drop_outliers( self, threshold:", "- len( self.transformations ) for k in range(max_floor): self.transformations +=", "the input vColumn. \"\"\" check_types([(\"n\", n, [int, float])]) return self.apply(func=\"ROUND({},", "Parameters ---------- func: list List of the different aggregation. aad", "2 bars. It is only valid if the vColumns are", "vColumn {} using the following mapping:{}\".format( self.alias, text_info ) )", "---# def nlargest(self, n: int = 10): \"\"\" --------------------------------------------------------------------------- Returns", "prod : product range : difference between the max and", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"min\"]).values[self.alias][0] # ---#", "to use to discretize the vColumn. auto : Uses method", "elif \"column\" in kwargs: column = kwargs[\"column\"] else: check =", "the method to 'numerical' if the vColumn is numerical ,", "str(elem[1]) new_column = \"COALESCE({}, DECODE({}, {}, NULL))\".format( \"{}\", by[0], \",", "/ MONTH / QUARTER / SECOND / TIME ZONE /", "(dropna) and (pre_comp != None): return pre_comp assert n >=", "idx)[\"color\"], lw=4, ), ] ax.set_title(\"KernelDensity\") ax.legend( custom_lines, columns, title=by, loc=\"center", "vColumn 'of'. min : Minimum of the vColumn 'of'. max", "[\"winsorize\", \"null\", \"mean\"]), (\"alpha\", alpha, [int, float]), (\"use_threshold\", use_threshold, [bool]),", "= f\"{close_l}{first_elem};{second_elem}{close_r}\" conditions += [ f\"'{first_elem}' {op1} {column} AND {column}", "= None elif method == \"robust_zscore\": self.catalog[elem] = (sauv[elem] -", "density : Percentage of the distribution. mean : Average of", "WHERE {} IS NOT NULL \".format(self.alias) if (dropna) else \"", "vColumn {} can not be converted to {}\".format( e, self.alias,", "values = { \"index\": [\"name\", \"dtype\"] + index, \"value\": [self.alias,", "if the vColumns are numerical. Optimized h will be computed", "to order the data\" ) desc = \"\" if (method", "by: if len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations) max_floor -=", "( \"label_encode is only available for categorical variables.\" ) warnings.warn(warning_message,", "str = \"auto\"): \"\"\" --------------------------------------------------------------------------- Computes the optimal vColumn bar", "some conditions. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes", "\"regr_sxy\": {}, \"regr_syy\": {}, } for elem in catalog: self.catalog[elem]", "Propagation of the next element (Constant Interpolation). ffill : Propagation", "if Badr and Fouad are in the vColumn. You can", "---# def normalize( self, method: str = \"zscore\", by: list", "else: final_transformation = [ ( \"({} - {}) / {}({}", "---# def div(self, x: float): \"\"\" --------------------------------------------------------------------------- Divides the vColumn", "empty or invalid. max_cardinality: int/tuple, optional Maximum number of distinct", "the vColumn. auto : Uses method 'same_width' for numerical vColumns,", "data. # # # Modules # # Standard Python Modules", "transformed. Parameters ---------- to_replace: str Regular expression to replace. value:", "to 2.\" ) assert len(breaks) == len(labels) + 1 or", "self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) count, vColumn_min,", "assert self.parent[y].nunique() == 2, TypeError( \"vColumn {} must be binary", "\" LIMIT {}\".format(limit) else: limit = \"\" query = \"(SELECT", "be the result of using previously the method on the", "p_alpha, p_1_alpha = ( -threshold * result[\"std\"][0] + result[\"avg\"][0], threshold", "bar width. \"\"\" check_types( [(\"method\", method, [\"sturges\", \"freedman_diaconis\", \"fd\", \"auto\"])]", "method = method.lower() if method == \"auto\": pre_comp = self.parent.__get_catalog_value__(self.alias,", "'{}')\".format(\"{}\", val) elif expr: new_column = \"COALESCE({}, {})\".format(\"{}\", expr) elif", "the vColumn outliers. mean : Replaces the upper and lower", "elements. See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "offset: int = 0): \"\"\" --------------------------------------------------------------------------- Returns a part of", "x != 0, ValueError(\"Division by 0 is forbidden !\") return", "self.parent.__update_catalog__({\"index\": [\"numh\"], self.alias: [best_h]}) if self.category() == \"int\": best_h =", "The vColumn will be transformed. Parameters ---------- to_replace: str Regular", "dtype ) ) # ---# def avg(self): \"\"\" --------------------------------------------------------------------------- Aggregates", "{}\".format( bin_spatial_to_str(self.category(), self.alias), self.parent.__genSQL__(), ), title=\"Computing the Store Usage of", "most occurent element density unique : cardinality (count distinct) var", "in the vColumn. vDataFrame.outliers : Adds a new vColumn labeled", "0, ValueError(\"Division by 0 is forbidden !\") return self.apply(func=\"{} /", "[bool])]) try: parent = self.parent force_columns = [ column for", "conditions, column = [], self.alias for idx in range(len(breaks) -", "Minimum of the vColumn 'of'. max : Maximum of the", "the a part of the vColumn. \"\"\" return self.iloc(limit=limit) #", "(ORDER BY {}) OVER (), PERCENTILE_CONT(1 - {}) WITHIN GROUP", "expression is in each of the vColumn records. vDataFrame[].str_count :", "idx, column in enumerate(columns): param = {\"color\": colors[idx % len(colors)]}", ".values[self.alias] ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = ( result[0],", "val == None: warning_message = \"The vColumn {} has no", "executeSQL( query, title=\"Different aggregations to compute the optimal h.\", method=\"fetchrow\",", "[int(item[1]) for item in result], \"percent\": [float(round(item[2], 3)) for item", "kwargs[\"color\"] = gen_colors()[0] if not (\"legend\" in kwargs): kwargs[\"legend\"] =", "[\"unique\", \"count\"] + [item[0] for item in query_result] else: result", "MIN({}), MAX({}) FROM {} GROUP BY {}\".format( by[0], self.alias, self.alias,", "= True if pie_type == \"rose\" else False if of:", "vDataFrame[].nsmallest : Returns the n smallest elements in the vColumn.", "\"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}') > 0\".format(\"{}\",", "[int, float]), (\"h\", h, [int, float]), ] ) if of:", "to consider when drawing the box plot. The other categories", "it's better practice to use this method when first preparing", "spider( self, by: str = \"\", method: str = \"density\",", "the parameter 'numcol' must be defined. numerical : Uses popular", "def geo_plot(self, *args, **kwargs): \"\"\" --------------------------------------------------------------------------- Draws the Geospatial object.", "one of \" \"the parameters ('prefix', 'prefix_sep'), you'll be able", "\" \" result = executeSQL( \"SELECT {} FROM (SELECT {},", "is numerical. \"\"\" return self.ctype().lower() in (\"bool\", \"boolean\") # ---#", "kwargs): from verticapy.plot import gen_colors kwargs[\"color\"] = gen_colors()[0] if not", "will be included. right: bool, optional How the intervals should", "[\"store_usage\"], self.alias: [store_usage]} ) return store_usage # ---# def str_contains(self,", "int(nbins))) * int(nbins): nth_elems += [str(total)] total += nb where", "element percent : percent of non-missing elements q% : q", "check_types( [ (\"method\", method, [\"zscore\", \"robust_zscore\", \"minmax\"]), (\"by\", by, [list]),", "-1, k: int = 6, new_category: str = \"Others\", RFmodel_params:", "Also -------- vDataFrame[].apply : Applies a function to the vColumn.", "|| FLOOR({} / {}) * {} || ';' || (FLOOR({}", "the comma. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply", "all_partition += [elem for elem in order_by] for elem in", "# |______ / / / # |____/ / / #", "(lower != None) or (upper != None), ParameterError( \"At least", "phases. xlim: tuple, optional Set the x limits of the", "< 0: index_stop += self.parent.shape()[0] limit = index_stop - index_start", "'{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def str_extract(self, pat: str):", "\"\" ): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using multiple statistical", "prefix_sep: str, optional Prefix delimitor of the dummies. drop_first: bool,", "<= 0: if nbins <= 0: h = self.numh() else:", "sum : sum std : standard deviation topk : kth", "used instead of the parent vDataFrame. This parameter is used", "Returns ------- float skewness See Also -------- vDataFrame.aggregate : Computes", "'{}'\".format( e, func.replace(\"{}\", \"x\"), self.alias.replace('\"', \"\") ) ) # ---#", "self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) try: if \"count\" in", "vDataFrame.case_when : Creates a new feature by evaluating some conditions.", "schema = verticapy.options[\"temp_schema\"] if not (schema): schema = \"public\" name", "0: index += self.parent.shape()[0] query = \"SELECT {}{} FROM {}{}", "non-missing elements q% : q quantile (ex: 50% for the", "[xlim] else: xlim_tmp = [] model = KernelDensity( name, bandwidth=bandwidth,", "vColumn {} was discretized.\".format(self.alias) ) return self.parent # ---# def", "{}{}GROUP BY {} ORDER BY _verticapy_cnt_ DESC LIMIT {}) VERTICAPY_SUBTABLE", "\\ # \\ \\ \\'/ \\ \\ \\____ \\ \\", "== \"float\": model = RandomForestRegressor(tmp_model_name) else: model = RandomForestClassifier(tmp_model_name) model.set_params({\"n_estimators\":", ") for elem in result if elem[2] != None ]", "h, max_cardinality, cat_priority, ax=ax, **style_kwds) # ---# def category(self): \"\"\"", "than the lower bound to the lower bound itself and", "global outlier). \"\"\" check_types( [ (\"alpha\", alpha, [int, float]), (\"use_threshold\",", "will be updated when the parent vDataFrame is modified. Attributes", "alpha, self.alias, self.parent.__genSQL__() ) p_alpha, p_1_alpha = executeSQL( query=query, title=\"Computing", "+= [ ( \"{}::{}\".format(\"{}\", dtype), dtype, get_category_from_vertica_type(ctype=dtype), ) ] self.parent.__add_to_history__(", "'lower' or 'upper' must have a numerical value\" ) lower_when", "columns naming.\\nBy changing one of \" \"the parameters ('prefix', 'prefix_sep'),", "the input element from the vColumn. Parameters ---------- x: float", "-------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return", "ORDER BY split_value::float\".format( \" UNION ALL \".join(query), nbins - 1", "by transforming the values lesser than the lower bound to", "check_types([(\"dropna\", dropna, [bool]), (\"n\", n, [int, float])]) if n ==", "\"\"): \"\"\" --------------------------------------------------------------------------- Applies a function to the vColumn. Parameters", "result.values[\"pt_non_events\"] += [\"\"] result.values[\"pt_events\"] += [\"\"] result.values[\"woe\"] += [\"\"] result.values[\"iv\"]", "'hour'... start: bool, optional If set to True, the record", ") lower_when = ( \"WHEN {} < {} THEN {}", "Micro Focus or one of its affiliates. # Licensed under", "optional Parent of the vColumn. One vDataFrame can have multiple", "{self.alias: val} return self.parent.isin(val) # ---# def isnum(self): \"\"\" ---------------------------------------------------------------------------", "method will return the transformation used instead of the parent", "\"approx_median\"]).values[self.alias] mad *= 1.4826 if mad != 0: if return_trans:", "Plot. step: bool, optional If set to True, draw a", "NULL LIMIT 20\".format( self.alias, dtype, self.alias, self.parent.__genSQL__(), self.alias ) executeSQL(query,", "== \"minmax\": self.catalog[\"min\"] = 0 self.catalog[\"max\"] = 1 self.parent.__add_to_history__( \"[Normalize]:", "\"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using the input list. Parameters", "method.lower() == \"sturges\": best_h = sturges elif method.lower() in (\"freedman_diaconis\",", "For example: 0.25 represents Q1. approx: bool, optional If set", "Warning) else: distinct_elements = self.distinct() expr = [\"DECODE({}\"] text_info =", "the vColumn using multiple statistical aggregations: min, max, median, unique...", "\"SELECT * FROM {} LIMIT 10\".format( self.parent.__genSQL__(force_columns=force_columns) ), print_time_sql=False, )", "(val == None): val = self.mode(dropna=True) if val == None:", "{0} ORDER BY COUNT(*) DESC LIMIT {1})\"\"\".format( self.alias, max_cardinality )", "method == \"auto\": pre_comp = self.parent.__get_catalog_value__(self.alias, \"numh\") if pre_comp !=", "else \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) ) result[idx][1] = \"NULL\" if (elem[1] ==", "as categorical. The less frequent elements will be gathered together", "optional Drops the first dummy to avoid the creation of", "func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) except: ctype =", "\"same_freq\", \"topk\"], ), (\"return_enum_trans\", return_enum_trans, [bool]), ] ) method =", "containing the result. For more information, see utilities.tablesample. See Also", ": Returns the tail of the vColumn. \"\"\" check_types([(\"limit\", limit,", "upper: float, optional Upper bound. Returns ------- vDataFrame self.parent See", "# ---# def range_plot( self, ts: str, q: tuple =", "to the vColumn. Parameters ---------- func: str, Function in pure", "Returns ------- float optimal bar width. \"\"\" check_types( [(\"method\", method,", "sys.getsizeof(self.alias) + sys.getsizeof(self.transformations) + sys.getsizeof(self.catalog) ) for elem in self.catalog:", "of the vColumn distinct elements to be used as categorical", "of non-Missing elements). Returns ------- int number of non-Missing elements.", "\"{}\") else: expr = \"{}({}, {})\".format(func.upper(), \"{}\", x) return self.apply(func=expr)", "(self.category() == \"int\") else \"\" if (h > 1) or", "a customized expression. \"\"\" if isinstance(func, str_sql): func = str(func)", "NOT NULL GROUP BY {} ORDER BY {}\".format( bin_spatial_to_str(self.category(), self.alias),", "vDataFrame.bar : Draws the Bar Chart of the input vColumns", "def astype(self, dtype: str): \"\"\" --------------------------------------------------------------------------- Converts the vColumn to", "ParameterError(\"Parameter 'n' must be greater or equal to 1\") where", "max_cardinality, [int, float]), (\"h\", h, [int, float]), (\"pie_type\", pie_type, [\"auto\",", "BY {}) AS _verticapy_row_nb_ FROM {} WHERE {} IS NOT", "vDataFrame.eval : Evaluates a customized expression. \"\"\" if isinstance(func, str_sql):", "of mean). Returns ------- float sem See Also -------- vDataFrame.aggregate", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using the input functions. Parameters", "table = \"(SELECT DATEDIFF('second', '{}'::timestamp, {}) AS {} FROM {})", "---# def tail(self, limit: int = 5): \"\"\" --------------------------------------------------------------------------- Returns", "\"\"\" check_types([(\"y\", y, [str]), (\"nbins\", nbins, [int])]) self.parent.are_namecols_in(y) y =", "+ 2)), 1e-99, ) fd = max(2.0 * (vColumn_075 -", "------- vDataFrame self.parent See Also -------- vDataFrame[].fill_outliers : Fills the", "or computed) nbins: int, optional Number of nbins. If empty,", "---# class vColumn(str_sql): \"\"\" --------------------------------------------------------------------------- Python object which that stores", "user defined Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].get_dummies :", "file except in compliance with the License. # You may", "list List of the different aggregation. aad : average absolute", "or computed) h: float, optional Interval width of the bar.", "bool True if the vColumn category is date. See Also", "to use to partition the TS. start_date: str / date,", "breaks[idx + 1] if right: op1, op2, close_l, close_r =", "0): \"\"\" --------------------------------------------------------------------------- Returns a part of the vColumn (delimited", "optional Integer corresponding to the offset. For example, if n", "if (method == \"mode\") and (val == None): val =", "def std(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'std' (Standard", "{} OFFSET {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), limit, offset,", ") return self.parent except Exception as e: raise ConversionError( \"{}\\nThe", "[str, datetime.datetime, datetime.date, int, float], ), (\"plot_median\", plot_median, [bool]), ]", "catalog, dict : Catalog of pre-computed aggregations. parent, vDataFrame :", "if (use_numbers_as_suffix) else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) name", "query, ) query = \"SELECT {}, ord, non_events, events, non_events", "for i in range(1, n): trans += \"WHEN {} BETWEEN", "Encoding was applied to the vColumn {} using the following", "vColumn using the average of the response partitioned by the", "FROM {} WHERE {} IS NOT NULL ORDER BY {}", "else: columns = [self.alias] if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of)", "up to the next whole number cos : trigonometric cosine", "if (self.isnum() and self.nunique(True) > 6) else \"mode\" total =", "2 in case of discretization using the method 'smart'.\" )", "Encodes the vColumn with a user defined Encoding. vDataFrame[].discretize :", "query, title=\"Different aggregations to compute the optimal h.\", method=\"fetchrow\", )", "start_date: str / date, optional Input Start Date. For example,", "\\/_/ \\/_____/ \\/_____/ \\/_____/ \\/_____/ \\/_/ \\/_/ \\/_/ \\/_/ #", "), ( \"end_date\", end_date, [str, datetime.datetime, datetime.date, int, float], ),", "{}) OVER (), PERCENTILE_CONT(1 - {}) WITHIN GROUP (ORDER BY", "to 'smart'. A RF Regressor will be trained if the", "\"SELECT {}, {} FROM {} LIMIT 1\".format( cmax, cmin, self.parent.__genSQL__()", "Discretizes the vColumn using the input method. Parameters ---------- method:", "not (self.parent.is_colname_in(new_name)), NameError( f\"A vColumn has already the alias {new_name}.\\nBy", "0.1) AS 'approx_10%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.25)", "Discretizes the vColumn using the input list. Parameters ---------- breaks:", "return self elif (n == 1) and (self.parent[by[0]].nunique() < 50):", "vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}')\".format(\"{}\", pat.replace(\"'\",", "query ) query_result = executeSQL( query=query, title=\"Computing the descriptive statistics", "---# def iloc(self, limit: int = 5, offset: int =", "), ) if return_trans: return \"({} - {}) / {}({}", "width of the bar. If empty, an optimized h will", "the Matplotlib functions. Returns ------- ax Matplotlib axes object See", "KernelDensity( name, bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=xlim_tmp, store=False, ) try: result", "self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"] += [self.alias] if add_history: self.parent.__add_to_history__( \"[Drop]: vColumn {} was", "ax=ax, **updated_dict(param, style_kwds, idx), ) custom_lines += [ Line2D( [0],", "chart of the vColumn based on an aggregation. \"\"\" if", "elif method == \"mean\": query = \"WITH vdf_table AS (SELECT", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'aad' (Average Absolute Deviation).", "self.isbool(): all_new_features = [] prefix = ( self.alias.replace('\"', \"\") +", "= 1 elif method == \"minmax\": self.catalog[\"min\"] = 0 self.catalog[\"max\"]", "to an aggregation. vColumns will memorize the already computed aggregations", "---------- x: float Input number. Returns ------- vDataFrame self.parent See", "dtype = ctype # ---# def date_part(self, field: str): \"\"\"", "float]), ] ) if func not in (\"log\", \"mod\", \"pow\",", "{} was added to the vDataFrame.\".format( self.alias, name ) )", "total self.catalog[\"percent\"] = ( 100 * (int(sauv[\"count\"]) + total) /", "{} AS index, non_events, events, pt_non_events, pt_events, CASE WHEN non_events", "int, optional Integer corresponding to the offset. For example, if", "self.parent See Also -------- vDataFrame.drop: Drops the input vColumns from", "else \" was \" self.parent.__add_to_history__( \"[Get Dummies]: One hot encoder", "WHERE {} IS NULL\".format(self.alias) if (category in (\"None\", None)) else", "ORDER BY {}))\".format( \"{}\", \"{}\", partition_by, order_by_ts ) if method", "will be closed on the right. Returns ------- vDataFrame self.parent", "-------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" check_types([(\"dropna\",", "if index_start < 0: index_start += self.parent.shape()[0] if isinstance(index_stop, int):", "of {}.\".format(self.alias), method=\"fetchall\", ) result = [distinct_count, self.count()] + [item[1]", "max_cardinality, nbins, h, ax=ax, **style_kwds) # ---# def iloc(self, limit:", "prod = product # ---# def quantile(self, x: float, approx:", "of = self.parent.format_colnames(of) from verticapy.plot import bar return bar(self, method,", "utilities.tablesample. See Also -------- vDataFrame[].describe : Computes the vColumn descriptive", "# ---# def cut( self, breaks: list, labels: list =", "using multiple statistical aggregations: min, max, median, unique... depending on", "\"''\") ) transformations = self.transformations + [(expr, \"bool\", \"int\")] new_vColumn", "in range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += final_transformation", "{} AS apply_test_feature FROM {} WHERE {} IS NOT NULL", "float, optional Interval width if the vColumn is numerical or", "\"robust_zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"approx_50%\"]) / ( 1.4826 *", "vColumn type is date like). The vColumn will be transformed.", "method 'smart'.\" ) assert response, ParameterError( \"Parameter 'response' can not", "self.parent # ---# def dropna(self): \"\"\" --------------------------------------------------------------------------- Filters the vDataFrame", "len(breaks) >= 2, ParameterError( \"Length of parameter 'breaks' must be", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"skewness\"]).values[self.alias][0]", "MAX({}) FROM {} GROUP BY {}\".format( by[0], self.alias, self.alias, self.parent.__genSQL__(),", "instead of the ceiling. Returns ------- vDataFrame self.parent See Also", "sturges = max( float(vColumn_max - vColumn_min) / int(math.floor(math.log(count, 2) +", "fun = \"APPROXIMATE_MEDIAN\" query = \"SELECT {}, {}({}) FROM {}", "See Also -------- vDataFrame[].date_part : Extracts a specific TS field", "dropna: bool = False, n: int = 1): \"\"\" ---------------------------------------------------------------------------", "from typing import Union # VerticaPy Modules import verticapy from", "RF Classifier otherwise. Example: Write {\"n_estimators\": 20, \"max_depth\": 10} to", "1 else \" was \" self.parent.__add_to_history__( \"[Get Dummies]: One hot", "self.alias: [best_h]}) if self.category() == \"int\": best_h = max(math.floor(best_h), 1)", "{}, \"regr_r2\": {}, \"regr_slope\": {}, \"regr_sxx\": {}, \"regr_sxy\": {}, \"regr_syy\":", "None): return pre_comp assert n >= 1, ParameterError(\"Parameter 'n' must", "new_column = \"COALESCE({}, '{}')\".format(\"{}\", val) elif expr: new_column = \"COALESCE({},", "categories {} to normalize.\".format( by[0] ), method=\"fetchall\", ) cmin =", "of a response. \"\"\" check_types( [ (\"RFmodel_params\", RFmodel_params, [dict]), (\"return_enum_trans\",", "None), ValueError( \"vColumn doesn't allow slicing having steps different than", "self, ts, q, start_date, end_date, plot_median, ax=ax, **style_kwds, ) #", "drastically decrease. Returns ------- float/str median See Also -------- vDataFrame.aggregate", "be considered during the computation. Returns ------- tablesample An object", "\\ \\ \\_\\ \\ \\ \\ \\-./\\ \\ \\ \\", "threshold * result[\"std\"][0] + result[\"avg\"][0], ) else: query = \"SELECT", ") title = \"Reads {} {} largest elements.\".format(self.alias, n) return", "unique=False ) .transpose() .values[self.alias] ) result = [distinct_count] + result", ") n = 1 if drop_first else 0 for k", "the vColumn {}\\n{} feature{}created: {}\".format( self.alias, len(all_new_features), conj, \", \".join(all_new_features)", ") + \".\" ) return self.parent one_hot_encode = get_dummies #", "/ HOUR / ISODOW / ISOWEEK / ISOYEAR / MICROSECONDS", "int, optional Maximum number of the vColumn distinct elements to", "\"''\")) ) # ---# def str_count(self, pat: str): \"\"\" ---------------------------------------------------------------------------", "numerical. \"\"\" return self.category() == \"date\" # ---# def isin(self,", "[(expr, \"bool\", \"int\")] new_vColumn = vColumn( name, parent=self.parent, transformations=transformations, catalog={", "\"\"\" --------------------------------------------------------------------------- Draws the bar chart of the vColumn based", "vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. vDataFrame[].label_encode :", "function to the input vColumn. Returns ------- vDataFrame self.parent See", "pie return pie( self, method, of, max_cardinality, h, donut, rose,", "of the merging category when using the 'topk' method. RFmodel_params:", "+= nb where = \"WHERE _verticapy_row_nb_ IN ({})\".format( \", \".join([\"1\"]", "{}\".format( self.parent.__genSQL__(), self.alias, self.alias, n ) title = \"Reads {}", "int, optional Number of bins used for the discretization (must", "\"int\", \"float\") ] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Mean Encode]: The vColumn", "{}) VERTICAPY_OPTIMAL_H_TABLE\".format( min_date, self.alias, self.alias, self.parent.__genSQL__() ) query = \"SELECT", "if isinstance(top, decimal.Decimal): top = float(top) self.parent.__update_catalog__( {\"index\": [\"top{}\".format(n)], self.alias:", "WHERE {} IS NOT NULL ORDER BY {} ASC LIMIT", "/ {}({})\".format( \"{}\", avg, \"NULLIFZERO\" if (nullifzero) else \"\", stddev", "h = round(h, 2) elif h > 0.0001: h =", "total, query, nth_elems = nb, [], [] while total <", "/ ({})\".format(self.alias, med, mad) else: final_transformation = [ ( \"({}", "index, \"value\": [self.alias, self.ctype()] + result, } if ((is_date) and", "Table. It tells the predictive power of an independent variable", "50% to get the median). It can also be a", "else: trans = (\"{} || ''\", \"varchar\", \"text\") if (self.isnum()", "# ---# def div(self, x: float): \"\"\" --------------------------------------------------------------------------- Divides the", "(numcol), ParameterError( \"The parameter 'numcol' must be a vDataFrame column", "aggregations. \"\"\" return self.aggregate([\"sem\"]).values[self.alias][0] # ---# def skewness(self): \"\"\" ---------------------------------------------------------------------------", "-1, dropna: bool = True): \"\"\" --------------------------------------------------------------------------- Returns the k", "the quantiles of {}.\".format(self.alias), method=\"fetchrow\", ) if method == \"winsorize\":", "power of another number round : rounds a value to", "a function to the input vColumn. \"\"\" check_types([(\"x\", x, [int,", "also increase the time of the learning and scoring phases.", "Draws the box plot of the vColumn. Parameters ---------- by:", "n ), title=\"Computing the mode.\", method=\"fetchall\", ) top = None", "for the discretization (must be > 1) Returns ------- tablesample", "return best_h # ---# def nunique(self, approx: bool = True):", "and Sturges. freedman_diaconis : Freedman Diaconis [2 * IQR /", "0 all_partition = by if method in [\"ffill\", \"pad\", \"bfill\",", "method=\"fetchall\", ) top = None if not (result) else result[0][0]", "= [by] if isinstance(order_by, str): order_by = [order_by] check_types( [", "*args / **kwargs Any optional parameter to pass to the", "the DB version you are using. Returns ------- tablesample An", "based on an aggregation. Parameters ---------- by: str, optional vColumn", "text / binary / spatial / uuid / undefined Returns", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].dropna : Drops", "str, optional New value. Returns ------- vDataFrame self.parent See Also", "+ 2\". copy_name: str, optional If not empty, a copy", "( \"UNION ALL (SELECT 'Others', SUM(count) FROM (SELECT COUNT(*) AS", "/ int(nbins))) * int(nbins): nth_elems += [str(total)] total += nb", "when the vColumn is null, 1 otherwise. expr: str, optional", "The vColumn {} was transformed using a mean encoding with", "max_floor = 0 all_partition = by if method in [\"ffill\",", "-------- vDataFrame[].nsmallest : Returns the n smallest elements in the", "(\"{} || ''\", \"varchar\", \"text\") if (self.isnum() and method ==", "enumerate(result): result[idx][0] = ( \"NULL\" if (elem[0] == None) else", "by dropping the current vColumn and creating a copy with", "the vColumn. \"\"\" check_types([(\"limit\", limit, [int, float]), (\"offset\", offset, [int,", "] ) assert self.isnum() or self.isdate(), TypeError( \"cut only works", "{}({} - {})\".format( \"{}\", cmin, \"NULLIFZERO\" if (nullifzero) else \"\",", "---# def aad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'aad'", "/ / # |______ / / / # |____/ /", "(h > 1) or (self.category() == \"float\"): trans = (", "std : standard deviation topk : kth most occurent element", "query = \"WITH vdf_table AS (SELECT * FROM {}) {}\".format(", "be greater or equal to 2.\" ) assert len(breaks) ==", "Returns True if the vColumn category is date, False otherwise.", "the parent vDataFrame. Parameters ---------- name: str Name of the", "[name] conj = \"s were \" if len(all_new_features) > 1", "optional parameter to pass to the Matplotlib functions. Returns -------", "nbins if h > 0.01: h = round(h, 2) elif", "------- int vColumn expected store usage. See Also -------- vDataFrame.expected_store_usage", ": Replaces the outliers by the NULL value. winsorize :", "=> {}\".format(distinct_elements[k], k) expr = \", \".join(expr) + \", {})\".format(len(distinct_elements))", "name=\"kde\") if isinstance(xlim, (tuple, list)): xlim_tmp = [xlim] else: xlim_tmp", "are in the vColumn and it returns the new vDataFrame", "NULL) VERTICAPY_SUBTABLE {}\".format( self.alias, self.alias, self.alias, self.parent.__genSQL__(), self.alias, where, )", "* sauv[\"count\"] / self.parent.shape()[0] ) for elem in sauv: if", "IS NOT NULL LIMIT 0\".format( func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias, ),", "method == \"is_numeric\" ): self.parent.__update_catalog__({\"index\": index, self.alias: result}) for elem", "'{second_elem}' THEN '{label}'\" ] expr = \"CASE WHEN \" +", "self elif method == \"minmax\": if n == 0: nullifzero", "NULL END)\" trans = (trans, \"varchar\", \"text\") if return_enum_trans: return", "): self.parent.__update_catalog__({\"index\": index, self.alias: result}) for elem in values: for", "= (None, None), ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the", "to use to impute the missing values. auto : Mean", "input aggregations. \"\"\" return self.aggregate([\"count\"]).values[self.alias][0] # ---# def cut( self,", "if (self.category() == \"int\") else \"\" if (h > 1)", "# ---# def get_dummies( self, prefix: str = \"\", prefix_sep:", "int)): index_start = 0 if index_start < 0: index_start +=", "\"float\", ) ] else: warning_message = \"Can not normalize {}", "h = (self.max() - self.min()) * 1.01 / nbins if", "in executeSQL( query=query, title=\"Computing the average of the {}'s lower", "total = abs(self.count() - total) except Exception as e: self.transformations", "expression. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains :", "trans = ( \"(CASE WHEN {} IN ({}) THEN {}", "in each of the vColumn record by an input value.", "# ---# def rename(self, new_name: str): \"\"\" --------------------------------------------------------------------------- Renames the", "{})\".format(\"{}\", expr) elif method == \"0ifnull\": new_column = \"DECODE({}, NULL,", "__nonzero__(self): return self.count() > 0 # ---# def __repr__(self): return", "self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import boxplot return boxplot(self,", "date. \"\"\" return self.category() in (\"float\", \"int\") # ---# def", "Encodes the vColumn with One-Hot Encoding. vDataFrame[].label_encode : Encodes the", "\"avg\"]).transpose().values p_alpha, p_1_alpha = ( -threshold * result[\"std\"][0] + result[\"avg\"][0],", "be filtered. ax: Matplotlib axes object, optional The axes to", ") total, query, nth_elems = nb, [], [] while total", "Parameters ---------- val: list List of the different records. For", "\"x\"), self.alias.replace('\"', \"\") ) ) # ---# def apply_fun(self, func:", "skew = skewness # ---# def slice(self, length: int, unit:", "{})\".format( self.alias, \", \".join(by) ), ) else: cmax, cmin =", "name, [str])]) name = quote_ident(name.replace('\"', \"_\")) assert name.replace('\"', \"\"), EmptyParameter(", "-------- vDataFrame[].tail : Returns the a part of the vColumn.", "axes object See Also -------- vDataFrame.donut : Draws the donut", "for elem in order_by] for elem in all_partition: if len(self.parent[elem].transformations)", "and bools), a RF Classifier otherwise. Example: Write {\"n_estimators\": 20,", "AVG(column1) + 5). of: str, optional The vColumn to use", "): max_floor = max( len(self.parent[column].transformations), max_floor ) except: pass max_floor", "pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp assert self.isnum() or self.isdate(), ParameterError(", "analytics and machine learning features. It supports the # entire", "events, pt_non_events, pt_events, CASE WHEN non_events = 0 OR events", "\", \".join(by) ), \"STDDEV({}) OVER (PARTITION BY {})\".format( self.alias, \",", "= \"s were \" if total > 1 else \"", "--------------------------------------------------------------------------- Returns the k most occurent elements, how often they", "Converts the vColumns to the input type. \"\"\" check_types([(\"dtype\", dtype,", "custom_lines += [ Line2D( [0], [0], color=updated_dict(param, style_kwds, idx)[\"color\"], lw=4,", "return_enum_trans: return trans else: self.transformations += [trans] sauv = {}", "True if pie_type == \"donut\" else False rose = True", "method == \"winsorize\": self.clip(lower=p_alpha, upper=p_1_alpha) elif method == \"null\": self.apply(", "# ---# def str_contains(self, pat: str): \"\"\" --------------------------------------------------------------------------- Verifies if", "str, optional vColumn to use to partition the data. h:", "---------- limit: int, optional Number of elements to display. offset:", "memorize the already computed aggregations to gain in performance. The", "-------- vDataFrame.analytic : Adds a new vColumn to the vDataFrame", "value at risk dtype : vColumn type iqr : interquartile", "plot of the vColumn. The aggregations used are the median", "[self.alias] if add_history: self.parent.__add_to_history__( \"[Drop]: vColumn {} was deleted from", "see utilities.tablesample. See Also -------- vDataFrame[].head : Returns the head", ") self.catalog = { \"cov\": {}, \"pearson\": {}, \"spearman\": {},", "int = 5, offset: int = 0): \"\"\" --------------------------------------------------------------------------- Returns", "numerical\" ) cast = \"::int\" if (self.parent[numcol].isbool()) else \"\" query,", "Returns ------- float sem See Also -------- vDataFrame.aggregate : Computes", "Also -------- vDataFrame[].str_contains : Verifies if the regular expression is", "sinh : hyperbolic sine sqrt : arithmetic square root tan", "{}) / {} < {}\".format( self.alias, result[\"avg\"][0], result[\"std\"][0], threshold )", "): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn with the One-Hot Encoding", "1) and (method != \"numerical\")) or not (is_numeric) or (method", "self.parent.__add_to_history__( \"[Normalize]: The vColumn '{}' was normalized with the method", "else prefix.replace('\"', \"_\") + prefix_sep.replace('\"', \"_\") ) n = 1", "considered during the computation. Returns ------- tablesample An object containing", "the vColumn from the vDataFrame. Dropping a vColumn means simply", ": Replaces the upper and lower outliers by their respective", "to_replace: str Regular expression to replace. value: str, optional New", "# / / # / / # \\ / #", "index_start = index.start if not (isinstance(index_start, int)): index_start = 0", "vDataFrame.astype : Converts the vColumns to the input type. \"\"\"", "elem in self.catalog: total += sys.getsizeof(elem) return total # ---#", "optional The method to use to aggregate the data. count", "bool = False, n: int = 1): \"\"\" --------------------------------------------------------------------------- Returns", "elem in result] ), ) executeSQL( \"SELECT {} FROM {}", "\"Reads {}.\".format(self.alias) tail = to_tablesample( \"SELECT {} AS {} FROM", "been transformed multiple times, so it's better practice to use", "{}) / {}({} - {})\".format( self.alias, cmin, \"NULLIFZERO\" if (nullifzero)", "not be considered during the computation. n: int, optional Integer", "MAX. MAX = MIN !\".format( self.alias ) warnings.warn(warning_message, Warning) return", "self.alias, self.parent.__genSQL__(), by[0], ), title=\"Computing the different categories to normalize.\",", "def hist( self, method: str = \"density\", of: str =", "date, optional Input Start Date. For example, time = '03-11-1993'", "drop(tmp_model_name, method=\"model\") if self.parent[response].category() == \"float\": model = RandomForestRegressor(tmp_model_name) else:", "assert order_by, ParameterError( \"If the method is in ffill|pad|bfill|backfill then", "(PARTITION BY {})\".format( self.alias, \", \".join(by) ), \"STDDEV({}) OVER (PARTITION", "\"\", return_enum_trans: bool = False, ): \"\"\" --------------------------------------------------------------------------- Discretizes the", "[str]), (\"kernel\", kernel, [\"gaussian\", \"logistic\", \"sigmoid\", \"silverman\"]), (\"bandwidth\", bandwidth, [int,", "November 1993 the 3rd. plot_median: bool, optional If set to", "[ (\"breaks\", breaks, [list]), (\"labels\", labels, [list]), (\"include_lowest\", include_lowest, [bool]),", "10): \"\"\" --------------------------------------------------------------------------- Returns the n largest vColumn elements. Parameters", "self.alias, name ) ) return self.parent # ---# def aggregate(self,", "self.alias, self.parent.__genSQL__(), self.alias, where, ) result = executeSQL( query=query, title=\"Computing", "of discretization using the method 'same_freq'\" ) count = self.count()", ": standard error of the mean skewness : skewness sum", "to sort the data when using TS methods. Returns -------", ": Freedman Diaconis [2 * IQR / n ** (1", "use to impute the missing values. auto : Mean for", "A float between 0 and 1 that represents the quantile.", "1 (1 meaning global outlier). \"\"\" check_types( [ (\"alpha\", alpha,", "decimal, warnings, datetime from collections.abc import Iterable from typing import", "to the upper bound itself. Parameters ---------- lower: float, optional", "columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) result = [distinct_count] +", ") return self.parent # ---# def fillna( self, val=None, method:", "self.category()) ] self.parent[copy_name].transformations += [(func, ctype, category)] self.parent[copy_name].catalog = self.catalog", "vDataFrame self.parent See Also -------- vDataFrame.fill_outliers : Fills the outliers", "the Z-Score (avg and std). (x - avg) / std", "5): \"\"\" --------------------------------------------------------------------------- Returns the tail of the vColumn. Parameters", "USING PARAMETERS percentile = 0.5) AS 'approx_50%', APPROXIMATE_PERCENTILE ({3}{4} USING", "for i in range(parameters[\"n_estimators\"]) ] query = \"SELECT split_value FROM", "(\"threshold\", threshold, [int, float]), ] ) if use_threshold: result =", "vColumn memory usage (byte) See Also -------- vDataFrame.memory_usage : Returns", "For more information, see utilities.tablesample. See Also -------- vDataFrame[].head :", "trans = (\"{} || ''\", \"varchar\", \"text\") if (self.isnum() and", "\"{}\" ) ) return self.parent # ---# def fillna( self,", "if self.parent[response].category() == \"float\": model = RandomForestRegressor(tmp_model_name) else: model =", "method: str, optional Method to use to compute the optimal", "self.alias) return to_tablesample(query, title=title) # ---# def numh(self, method: str", "Interval width of the bar. If empty, an optimized h", "the values higher than the upper bound to the upper", "\"MEDIAN\": fun = \"APPROXIMATE_MEDIAN\" query = \"SELECT {}, {}({}) FROM", "self.distinct() if distinct_elements not in ([0, 1], [1, 0]) or", "vColumn to use to partition the data. h: float, optional", "== None: self.catalog[elem] = None elif method == \"robust_zscore\": self.catalog[elem]", "method == \"robust_zscore\": self.catalog[\"median\"] = 0 self.catalog[\"mad\"] = 1 /", "val, [list])]) val = {self.alias: val} return self.parent.isin(val) # ---#", "optional The vColumn to use to compute the aggregation. max_cardinality:", "val) elif (len(by) == 1) and (self.parent[by[0]].nunique() < 50): try:", "self.alias, avg, \"NULLIFZERO\" if (nullifzero) else \"\", stddev ) else:", "Parameters ---------- n: int Number of digits to keep after", "\"\"\" --------------------------------------------------------------------------- Returns the category of the vColumn. The category", "to extract. It must be one of the following: CENTURY", "at least one element to use to order the data\"", "index.stop index_start = index.start if not (isinstance(index_start, int)): index_start =", "Box Plot of the input vColumns. \"\"\" if isinstance(cat_priority, str)", "Outliers. \"\"\" if isinstance(by, str): by = [by] check_types( [", "= {}, response: str = \"\", return_enum_trans: bool = False,", "no mode (only missing values).\\nNothing was filled.\".format( self.alias ) warnings.warn(warning_message,", ": Uses popular numerical aggregations during the computation. max_cardinality: int,", "numerical , 'categorical' otherwise. categorical : Uses only categorical aggregations", ") cmin = \"DECODE({}, {}, NULL)\".format( by[0], \", \".join( [", "See Also -------- vDataFrame[].str_contains : Verifies if the regular expression", "absolute value function to the input vColumn. Returns ------- vDataFrame", "spider plot of the input vColumn based on an aggregation.", "query = \"SELECT {}, {}({}) FROM {} GROUP BY {};\".format(", "[0], color=updated_dict(param, style_kwds, idx)[\"color\"], lw=4, ), ] ax.set_title(\"KernelDensity\") ax.legend( custom_lines,", "and (numcol != \"\"): numcol = self.parent.format_colnames(numcol) assert self.parent[numcol].category() in", "= \"\", max_cardinality: int = 6, h: float = 0,", "offset and a limit). Parameters ---------- limit: int, optional Number", "nbins: int = -1, k: int = 6, new_category: str", "add_history: self.parent.__add_to_history__( \"[Drop]: vColumn {} was deleted from the vDataFrame.\".format(self.alias)", "the final generated SQL code. Note: Dropping a vColumn can", "= [], return_trans: bool = False ): \"\"\" --------------------------------------------------------------------------- Normalizes", "is date. \"\"\" return self.category() in (\"float\", \"int\") # ---#", "vdf_table AS (SELECT * FROM {}) (SELECT AVG({}) FROM vdf_table", "---------- method: str, optional The describe method. auto : Sets", "data when using TS methods. Returns ------- vDataFrame self.parent See", "in range(len(distinct_elements)): expr += [ \"'{}', {}\".format(str(distinct_elements[k]).replace(\"'\", \"''\"), k) ]", "by[0] ), method=\"fetchall\", ) cmin = \"DECODE({}, {}, NULL)\".format( by[0],", "the input vColumn. Returns ------- vDataFrame self.parent See Also --------", "will be considered as an outlier. use_threshold: bool, optional Uses", "the Apache License, Version 2.0 (the \"License\"); # You may", "of the vColumn 'of' (ex: 50% to get the median).", "the specified name. \\u26A0 Warning : SQL code generation will", "float]), (\"dropna\", dropna, [bool])]) topk = \"\" if (k <", "See Also -------- vDataFrame[].decode : Encodes the vColumn with user", "NULL END)\".format( \"{}\", p_alpha, p_1_alpha, \"{}\" ) ) elif method", "result = executeSQL( \"SELECT {}, AVG({}), STDDEV({}) FROM {} GROUP", "None else \"NULL\", ) for elem in result if elem[1]", "vColumn_min) / int(math.floor(math.log(count, 2) + 2)), 1e-99, ) fd =", "+= self.parent.shape()[0] limit = index_stop - index_start if limit <=", "Labels used to name the new categories. If empty, names", "hist(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds) # ---#", "empty, an optimized h will be computed. pie_type: str, optional", "\"auto\": pre_comp = self.parent.__get_catalog_value__(self.alias, \"numh\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": return", "using the floor of the slicing instead of the ceiling.", "xlim_tmp = [] model = KernelDensity( name, bandwidth=bandwidth, kernel=kernel, nbins=nbins,", "[item for sublist in query_result for item in sublist] #", "method=\"numerical\", columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) count, vColumn_min, vColumn_025,", "else \"\", stddev ), \"float\", \"float\", ) ] elif method", "optional Maximum number of vColumn distinct elements to be used", "the head of the vColumn. vDataFrame[].tail : Returns the tail", "pre-computed aggregations. parent, vDataFrame : Parent of the vColumn. transformations,", "outlier). \"\"\" check_types( [ (\"alpha\", alpha, [int, float]), (\"use_threshold\", use_threshold,", "use to determine if the vColumn will be considered as", "nb where = \"WHERE _verticapy_row_nb_ IN ({})\".format( \", \".join([\"1\"] +", "------- float var See Also -------- vDataFrame.aggregate : Computes the", "/ self.parent.shape()[0] ) for elem in sauv: if \"top\" in", "dummies. prefix_sep: str, optional Prefix delimitor of the dummies. drop_first:", "the input vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) if self.isdate():", "AS pt_non_events, events / NULLIFZERO(SUM(events) OVER ()) AS pt_events FROM", "\"std\", \"min\", \"approx_25%\", \"approx_50%\", \"approx_75%\", \"max\", ] if method !=", "time of the learning and scoring phases. xlim: tuple, optional", "({}) x GROUP BY 1\".format( self.alias, y, y, query, )", "vColumns, cast the other types to varchar. same_freq : Computes", "* FROM {}) (SELECT AVG({}) FROM vdf_table WHERE {} <", "= False if check: self.parent.are_namecols_in(column) column = self.parent.format_colnames(column) columns +=", "nunique(self, approx: bool = True): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "= 0 OR events = 0 THEN 0 ELSE ZEROIFNULL(LN(pt_non_events", "'var' (Variance). Returns ------- float var See Also -------- vDataFrame.aggregate", "the input Name. Returns ------- vDataFrame self.parent See Also --------", "GROUP BY {};\".format( by[0], fun, self.alias, self.parent.__genSQL__(), by[0] ) result", "functions. Returns ------- ax Matplotlib axes object See Also --------", "gathered together to create a new category : 'Others'. cat_priority:", "input type. \"\"\" check_types([(\"dtype\", dtype, [str])]) try: query = \"SELECT", "\\ \\-. \\ # \\ \\__| \\ \\_____\\ \\ \\_____\\", "\\ \\ \\-. \\ # \\ \\__| \\ \\_____\\ \\", "chart of the vColumn based on an aggregation. Parameters ----------", "a numerical column for each vColumn category. In this case,", "method, [\"sturges\", \"freedman_diaconis\", \"fd\", \"auto\"])] ) method = method.lower() if", "'topk'\" ) distinct = self.topk(k).values[\"index\"] trans = ( \"(CASE WHEN", "\"top\" in elem: if \"percent\" in elem: self.catalog[elem] = sauv[elem]", "the bar. If empty, an optimized h will be computed.", "of the slicing. step: int Size of the slicing. Returns", "(\"same_width\", \"auto\"): if not (h) or h <= 0: if", "\"\"\" check_types([(\"lower\", lower, [float, int]), (\"upper\", upper, [float, int])]) assert", "Interval width if the vColumn is numerical or of type", "the already computed aggregations to gain in performance. The catalog", "method = \"mean\" if (self.isnum() and self.nunique(True) > 6) else", "use to fill the vColumn outliers. mean : Replaces the", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", ": Encodes the vColumn with One-Hot Encoding. \"\"\" check_types([(\"response\", response,", ") else: query = \"SELECT PERCENTILE_CONT({}) WITHIN GROUP (ORDER BY", "License. # You may obtain a copy of the License", "decimal.Decimal): values[elem][i] = float(values[elem][i]) return tablesample(values) # ---# def discretize(", "{}' to '{}'\".format( e, func.replace(\"{}\", \"x\"), self.alias.replace('\"', \"\") ) )", "max FROM {}\".format( self.alias, self.alias, self.alias, self.alias, self.alias, table )", "Draws the Time Series of the vColumn. Parameters ---------- ts:", "to the vColumn {}\\n{} feature{}created: {}\".format( self.alias, len(all_new_features), conj, \",", "Union[int, float, tuple] = (None, None), ax=None, **style_kwds, ): \"\"\"", "\"{}\", p_alpha, mean_alpha, \"{}\", p_1_alpha, mean_1_alpha, \"{}\" ) ) return", "(No h will be picked or computed) ax: Matplotlib axes", "( \"func\", func, [ \"abs\", \"acos\", \"asin\", \"atan\", \"cbrt\", \"ceil\",", "computed if the parameter is empty or invalid. max_cardinality: int/tuple,", "self.clip(lower=p_alpha, upper=p_1_alpha) elif method == \"null\": self.apply( func=\"(CASE WHEN ({}", "return pie( self, method, of, max_cardinality, h, donut, rose, ax=None,", "(PARTITION BY {})\".format( self.alias, \", \".join(by) ), \"MIN({}) OVER (PARTITION", "float mad See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "be considered as an outlier. use_threshold: bool, optional Uses the", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"skewness\"]).values[self.alias][0] skew", "({} BETWEEN {} AND {}) THEN {} ELSE NULL END)\".format(", "prefix_sep: str = \"_\", drop_first: bool = True, use_numbers_as_suffix: bool", "generated will look like: even: CASE ... WHEN vColumn =", "for elem in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) try:", "= \"\" query = \"(SELECT {} FROM {}{} OFFSET {}{})", "\"AVG\" if by == []: if fun == \"AVG\": val", "import KernelDensity schema = verticapy.options[\"temp_schema\"] if not (schema): schema =", "--------------------------------------------------------------------------- Aggregates the vColumn using 'mad' (median absolute deviation). Returns", "or one of its affiliates. # Licensed under the Apache", "(must be > 1) Returns ------- tablesample An object containing", "h > 0.0001: h = round(h, 4) elif h >", ": Returns the n largest vColumn elements. \"\"\" check_types([(\"n\", n,", "= \"public\" name = gen_tmp_name(schema=schema, name=\"kde\") if isinstance(xlim, (tuple, list)):", "(Average Absolute Deviation). Returns ------- float aad See Also --------", "result = executeSQL( query=query, title=\"Computing the equal frequency histogram bins.\",", "nbins=nbins, k=nbins, new_category=\"Others\", return_enum_trans=True, )[0].replace(\"{}\", self.alias) query = \"SELECT {}", "{} named {} was added to the vDataFrame.\".format( self.alias, name", "aggregations. \"\"\" return self.aggregate([\"count\"]).values[self.alias][0] # ---# def cut( self, breaks:", "ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the pie chart of", "raise ConversionError( \"{}\\nThe vColumn {} can not be converted to", "input vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) if self.isdate(): return", "(function, type, category) parent: vDataFrame, optional Parent of the vColumn.", "numcol: str = \"\" ): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "PARAMETERS percentile = 0.25) AS Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile", "limits of the current axes. ax: Matplotlib axes object, optional", "NAs, MIN({}) AS min, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.25)", "quantiles used to draw the Plot. start_date: str / date,", "in relation to the dependent variable. Parameters ---------- y: str", "elements). Returns ------- int number of non-Missing elements. See Also", "tablesample(values) # ---# def value_counts(self, k: int = 30): \"\"\"", "vDataFrame[].isnum : Returns True if the vColumn is numerical. \"\"\"", "Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes the", "\"count\": self.parent.shape()[0], \"percent\": 100.0, \"unique\": 2, \"approx_unique\": 2, \"prod\": 0,", "approx: bool, optional If set to True, the approximate median", "None) or (upper != None), ParameterError( \"At least 'lower' or", "Name of the copy. Returns ------- vDataFrame self.parent See Also", "the floor of the slicing instead of the ceiling. Returns", "the length of parameter 'labels' + 1 or parameter 'labels'", ": Adds a new vColumn labeled with 0 and 1", "self.avg() elif fun == \"MEDIAN\": val = self.median() new_column =", "method in [\"ffill\", \"pad\", \"bfill\", \"backfill\"]: all_partition += [elem for", "picked or computed) nbins: int, optional Number of bins. If", ") assert response, ParameterError( \"Parameter 'response' can not be empty", "= \"<=\", \"<\", \"[\", \"[\" if idx == 0 and", "elif (method == \"cat_stats\") and (numcol != \"\"): numcol =", "The new vColumn alias. Returns ------- vDataFrame self.parent See Also", "{})\".format(x, \"{}\")) else: return self.apply(func=\"{} - ({})\".format(\"{}\", x)) # ---#", "Parameters ---------- pat: str Regular expression. Returns ------- vDataFrame self.parent", "pie chart. auto : Regular pie chart. donut : Donut", "Parameters ---------- argv: object Any amount of expressions. The expression", "RandomForestClassifier(tmp_model_name) model.set_params({\"n_estimators\": 20, \"max_depth\": 8, \"nbins\": 100}) model.set_params(RFmodel_params) parameters =", "= str(func) check_types([(\"func\", func, [str]), (\"copy_name\", copy_name, [str])]) try: try:", "best_h = max(sturges, fd) self.parent.__update_catalog__({\"index\": [\"numh\"], self.alias: [best_h]}) if self.category()", "# ---# def label_encode(self): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn using", "] force_columns.remove(self.alias) executeSQL( \"SELECT * FROM {} LIMIT 10\".format( self.parent.__genSQL__(force_columns=force_columns)", "hot encoder was applied to the vColumn {}\\n{} feature{}created: {}\".format(", "self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import spider as spider_plot", "sqrt : arithmetic square root tan : trigonometric tangent tanh", "next whole number cos : trigonometric cosine cosh : hyperbolic", "(\"mean\", \"avg\", \"median\"): fun = \"MEDIAN\" if (method == \"median\")", "int = 10): \"\"\" --------------------------------------------------------------------------- Computes the Information Value (IV)", "standard error of the mean skewness : skewness sum :", "\" was \" if verticapy.options[\"print_info\"]: print(\"{} element{}filled.\".format(total, conj)) self.parent.__add_to_history__( \"[Fillna]:", "= \", \".join([quote_ident(column) + desc for column in order_by]) new_column", "is numerical. \"\"\" return self.category() == \"date\" # ---# def", "Set the x limits of the current axes. ax: Matplotlib", "nbins <= 0: h = self.numh() else: h = (self.max()", "list, labels: list = [], include_lowest: bool = True, right:", "== \"MEDIAN\": val = self.median() new_column = \"COALESCE({}, {})\".format(\"{}\", val)", "!= None else \"NULL\", elem[2] if elem[2] != None else", "vdf_table\"\"\".format( category, self.alias, self.parent.shape()[0], numcol, cast, ) tmp_query += (", "unique=False ) .transpose() .values[self.alias] ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max", "current vColumn and creating a copy with the specified name.", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "records. The vColumn will be transformed. Parameters ---------- pat: str", "idx, elem in enumerate(result): result[idx][0] = ( \"NULL\" if (elem[0]", "the geopandas plot function. For more information, see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html", "the function's performance can drastically decrease. Returns ------- float/str median", "in range(parameters[\"n_estimators\"]) ] query = \"SELECT split_value FROM (SELECT split_value,", "prefix: str, optional Prefix of the dummies. prefix_sep: str, optional", "the average of the response partitioned by the different vColumn", "input aggregations. \"\"\" check_types([(\"x\", x, [int, float], (\"approx\", approx, [bool]))])", "Parameters ---------- method: str, optional The method to use to", "), \"varchar\", \"text\", ) else: trans = (\"FLOOR({}) || ''\",", "= self.aggregate(func=[\"std\", \"avg\"]).transpose().values p_alpha, p_1_alpha = ( -threshold * result[\"std\"][0]", "\"value\": [self.alias, self.ctype()] + result, } if ((is_date) and not", "number of non-Missing elements. See Also -------- vDataFrame.aggregate : Computes", "\"\"\" check_types( [ (\"by\", by, [str]), (\"kernel\", kernel, [\"gaussian\", \"logistic\",", "vColumn using 'sum'. Returns ------- float sum See Also --------", "str Regular expression. Returns ------- vDataFrame self.parent See Also --------", "algorithm. Parameters ---------- prefix: str, optional Prefix of the dummies.", "works on numerical / date-like vColumns.\" ) assert len(breaks) >=", "else False rose = True if pie_type == \"rose\" else", "spider_plot return spider_plot( self.parent, columns, method, of, max_cardinality, h, ax=ax,", "of the vColumn 'of'. sum : Sum of the vColumn", "draw a Step Plot. ax: Matplotlib axes object, optional The", "required by applicable law or agreed to in writing, software", "Version 2.0 (the \"License\"); # You may not use this", "THEN {} \".format(\"{}\", upper, upper) if (isinstance(upper, (float, int))) else", "/ / / # \\ / / / # \\/", "Table. \"\"\" check_types([(\"y\", y, [str]), (\"nbins\", nbins, [int])]) self.parent.are_namecols_in(y) y", "axes object, optional The axes to plot on. **style_kwds Any", "iloc(self, limit: int = 5, offset: int = 0): \"\"\"", "Number of most occurent elements to return. dropna: bool, optional", "(\"cat_priority\", cat_priority, [list]), ] ) if by: self.parent.are_namecols_in(by) by =", "{})\".format( self.alias, \", \".join(by) ), ) else: avg, stddev =", "variable in relation to the dependent variable. Parameters ---------- y:", "the vDataFrame input aggregations. \"\"\" check_types([(\"x\", x, [int, float], (\"approx\",", "# # # Modules # # Standard Python Modules import", "\"[Get Dummies]: One hot encoder was applied to the vColumn", "agreed to in writing, software # distributed under the License", "the input vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) return self.apply(func=\"{}", ": Extracts a specific TS field from the vColumn. \"\"\"", "Parameters ---------- approx: bool, optional If set to True, the", "using an input 'quantile'. Parameters ---------- x: float A float", "0: if return_trans: return \"({} - {}) / ({})\".format(self.alias, med,", "[self.alias, by] else: columns = [self.alias] if of: self.parent.are_namecols_in(of) of", "+ \".\" ) return self.parent one_hot_encode = get_dummies # ---#", "True if the vColumn is boolean. vDataFrame[].isnum : Returns True", "**style_kwds, ) # ---# def product(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "one vColumn can only have one parent. catalog: dict, optional", "rule. \"\"\" return self.apply(func=\"DATE_PART('{}', {})\".format(field, \"{}\")) # ---# def decode(self,", "Evidence (WOE) Table. It tells the predictive power of an", "in the vColumn. You can write the following list: [\"Fouad\",", "record of the vColumn. vDataFrame[].str_slice : Slices the vColumn. \"\"\"", "elif (len(by) == 1) and (self.parent[by[0]].nunique() < 50): try: if", "bool, optional If set to True, the approximate cardinality is", "\"\"\" --------------------------------------------------------------------------- Draws the histogram of the vColumn based on", "(nullifzero) else \"\", cmax, cmin, ), \"float\", \"float\", ) ]", "time = '03-11-1993' will filter the data when 'ts' is", "Applies a function to the input vColumn. \"\"\" check_types([(\"x\", x,", "Parameters ---------- dtype: str New type. Returns ------- vDataFrame self.parent", "than quantile(alpha) or greater than quantile(1-alpha) will be filled. Returns", "to the next whole number cos : trigonometric cosine cosh", "= ( 100 * sauv[\"count\"] / self.parent.shape()[0] ) except: pass", "func ) ): max_floor = max( len(self.parent[column].transformations), max_floor ) except:", "Jarque-Bera index mad : median absolute deviation max : maximum", "popular numerical aggregations during the computation. max_cardinality: int, optional Cardinality", "{}\".format( self.alias, self.alias, self.alias, self.alias, self.alias, table ) result =", "numerical. by: str, optional vColumn to use to partition the", "specific TS field from the vColumn (only if the vColumn", "1, 0)\".format( \"{}\", str(distinct_elements[k]).replace(\"'\", \"''\") ) transformations = self.transformations +", "Input End Date. For example, time = '03-11-1993' will filter", "by evaluating some conditions. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode", "10} to train a Random Forest with 20 trees and", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame.fill_outliers : Fills", "Write {\"n_estimators\": 20, \"max_depth\": 10} to train a Random Forest", "vColumn. \"\"\" check_types([(\"n\", n, [int, float])]) return self.apply(func=\"ROUND({}, {})\".format(\"{}\", n))", "# ---# def tail(self, limit: int = 5): \"\"\" ---------------------------------------------------------------------------", "the vDataFrame input aggregations. \"\"\" check_types([(\"dropna\", dropna, [bool]), (\"n\", n,", "float std See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "different categories {} to normalize.\".format( by[0] ), method=\"fetchall\", ) cmin", "product See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "Fills the vColumn outliers using the input method. \"\"\" check_types([(\"lower\",", "order_by: list, optional List of the vColumns to use to", "The method to use to discretize the vColumn. auto :", "elif method == \"zscore\": self.catalog[\"mean\"] = 0 self.catalog[\"std\"] = 1", "except: avg, stddev = ( \"AVG({}) OVER (PARTITION BY {})\".format(", "the threshold, it will be considered as an outlier. use_threshold:", "\"\"\" import sys total = ( sys.getsizeof(self) + sys.getsizeof(self.alias) +", "utilities.tablesample. See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "------- float std See Also -------- vDataFrame.aggregate : Computes the", "function to the input vColumn. \"\"\" check_types([(\"x\", x, [int, float])])", "PERCENTILE_CONT({}) WITHIN GROUP (ORDER BY {}) OVER (), PERCENTILE_CONT(1 -", "list, optional List of the different categories to consider when", "for categorical variables.\" ) warnings.warn(warning_message, Warning) else: distinct_elements = self.distinct()", "method in (\"mean\", \"avg\", \"median\"): fun = \"MEDIAN\" if (method", "to display. Returns ------- tablesample An object containing the result.", "= \"The method 'robust_zscore' is available only if the parameter", "vColumn to use to order the data. The vColumn type", "= True, use_numbers_as_suffix: bool = False, ): \"\"\" --------------------------------------------------------------------------- Encodes", "best_h # ---# def nunique(self, approx: bool = True): \"\"\"", "if elem[2] != None ] ), ) executeSQL( \"SELECT {},", "b). Returns ------- int vColumn expected store usage. See Also", "): \"\"\" --------------------------------------------------------------------------- Fills missing elements in the vColumn with", "try: if fun == \"MEDIAN\": fun = \"APPROXIMATE_MEDIAN\" query =", "{} AND {} THEN '[{};{}]' \".format( \"{}\", result[i - 1],", "ffill : Propagation of the first element (Constant Interpolation). mean", ": Slices the vColumn. \"\"\" check_types([(\"to_replace\", to_replace, [str]), (\"value\", value,", "Returns True if the vColumn category is date. \"\"\" return", "nullifzero, n = 1, len(by) if self.isbool(): warning_message = \"Normalize", "= self.alias return tail # ---# def isbool(self): \"\"\" ---------------------------------------------------------------------------", "most occurent element percent : percent of non-missing elements q%", "kernel. sigmoid : Sigmoid kernel. silverman : Silverman kernel. nbins:", ": Returns the vDataFrame memory usage. \"\"\" import sys total", "response_cat = self.parent[y].distinct() response_cat.sort() assert response_cat == [0, 1], TypeError(", "< 1) else \"LIMIT {}\".format(k) dropna = \" WHERE {}", "n - 1] (n being the vColumn cardinality). Returns -------", "after the comma. Returns ------- vDataFrame self.parent See Also --------", "\"\", stddev ) else: final_transformation = [ ( \"({} -", "number raised to the power of another number round :", "\"x\"), ) ) else: for k in range(max_floor): self.transformations +=", "model.drop() raise # ---# def describe( self, method: str =", "See Also -------- vDataFrame.add_copy : Creates a copy of the", "\", \".join(all_new_features) ) + \".\" ) return self.parent one_hot_encode =", "if by == []: if fun == \"AVG\": val =", "vColumn bar width. Parameters ---------- method: str, optional Method to", "Dictionary of the Random Forest model parameters used to compute", "data. method: str, optional The method to use to aggregate", "dropping the current vColumn and creating a copy with the", "= 0.25) AS Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.75)", "numcol = self.parent.format_colnames(numcol) assert self.parent[numcol].category() in (\"float\", \"int\"), TypeError( \"The", "columns = [self.alias, by] else: columns = [self.alias] if of:", "count : Number of elements. density : Percentage of the", "= \"COALESCE({}, {})\".format(\"{}\", expr) elif method == \"0ifnull\": new_column =", "OR CONDITIONS OF ANY KIND, either express or implied. #", "rp] query = \"WITH vdf_table AS (SELECT * FROM {})", "elements in the vColumn with a user-specified rule. Parameters ----------", "LIMIT {}\".format(limit) else: limit = \"\" query = \"(SELECT {}", "Gaussian distribution to define the outliers. After normalizing the data", "= \"Can not normalize {} using a Z-Score - The", "END)\".format( bin_spatial_to_str(self.category()), \", \".join( [ \"'{}'\".format(str(elem).replace(\"'\", \"''\")) for elem in", "be generated. include_lowest: bool, optional If set to True, the", "of the following: date / int / float / text", "[\"mode\", \"0ifnull\"]: max_floor = 0 all_partition = by if method", "= 0.75) AS Q3, MAX({}) AS max FROM {}\".format( self.alias,", "drop(tmp_view_name, method=\"view\") self.parent.to_db(tmp_view_name) from verticapy.learn.ensemble import ( RandomForestClassifier, RandomForestRegressor, )", "Random Forest with 20 trees and a maximum depth of", "self.alias, y, y, query, ) query = \"SELECT {}, ord,", "tail.offset = offset tail.dtype[self.alias] = self.ctype() tail.name = self.alias return", "utilities.tablesample. See Also -------- vDataFrame.analytic : Adds a new vColumn", "/ date, optional Input End Date. For example, time =", ") # ---# def str_replace(self, to_replace: str, value: str =", "), \"apply_test_feature\", ) except: ctype = get_data_types( \"SELECT {} AS", "if n == 0: nullifzero = 0 cmin, cmax =", "# |____/ / / # _____________ / / # \\", "Returns the head of the vColumn. \"\"\" return self.iloc(limit=limit, offset=-1)", "val: list, *args): \"\"\" --------------------------------------------------------------------------- Looks if some specific records", "See Also -------- vDataFrame.outliers : Computes the vDataFrame Global Outliers.", "'of'. max : Maximum of the vColumn 'of'. sum :", "ISOWEEK / ISOYEAR / MICROSECONDS / MILLENNIUM / MILLISECONDS /", "the mean encoding of a response. \"\"\" check_types( [ (\"prefix\",", "is set to 'smart'. return_enum_trans: bool, optional Returns the transformation", "density unique : cardinality (count distinct) var : variance Other", "optional parameter to pass to the geopandas plot function. For", "name: str): \"\"\" --------------------------------------------------------------------------- Adds a copy vColumn to the", ") return executeSQL( query=query, title=\"Getting the vColumn element.\", method=\"fetchfirstelem\", )", "and built-in analytics and machine learning features. It supports the", ": number of non-missing elements cvar : conditional value at", "# # ---# class vColumn(str_sql): \"\"\" --------------------------------------------------------------------------- Python object which", "{} \".format(\"{}\", lower, lower) if (isinstance(lower, (float, int))) else \"\"", "exponential function floor : value down to the next whole", "of one of the dummies ({name}).\\n\" \"It can be the", "Time Series of the vColumn. Parameters ---------- ts: str TS", "Step Plot. ax: Matplotlib axes object, optional The axes to", "\"start_date\", start_date, [str, datetime.datetime, datetime.date, int, float], ), ( \"end_date\",", "step: int): \"\"\" --------------------------------------------------------------------------- Slices the vColumn. The vColumn will", "pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) #", "to the following: (function, type, category) parent: vDataFrame, optional Parent", "as e: raise ConversionError( \"{}\\nThe vColumn {} can not be", "(1.0 / 3.0), 1e-99) if method.lower() == \"sturges\": best_h =", "regular expression is in each of the vColumn records. vDataFrame[].extract", "] text_info += \"\\t{} => {}\".format(distinct_elements[k], k) expr = \",", "= self.aggregate([\"count\", \"min\", \"max\"]) index = result.values[\"index\"] result = result.values[self.alias]", "= 10): \"\"\" --------------------------------------------------------------------------- Returns the n smallest elements in", "str, optional The vColumn to use to compute the aggregation.", "FROM (SELECT split_value, MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE WHERE split_value IS", "\"or simply because of ambiguous columns naming.\\nBy changing one of", "Number representing the outliers threshold. Values lesser than quantile(alpha) or", "was \" self.parent.__add_to_history__( \"[Get Dummies]: One hot encoder was applied", "int/tuple, optional Maximum number of distinct elements for vColumns 1", "all_partition: if len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations) max_floor -=", "method in (\"mean\", \"median\") or isinstance(val, float): category, ctype =", "the func 'x -> {}' to '{}'\".format( e, func.replace(\"{}\", \"x\"),", "the vColumn type is date like). The vColumn will be", "category will be one of the following: date / int", "{}{} FROM {}{} OFFSET {} LIMIT 1\".format( self.alias, cast, self.parent.__genSQL__(),", "optional List of the vColumns to use to sort the", "(\"nbins\", nbins, [int, float]), ] ) if of: self.parent.are_namecols_in(of) of", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"aad\"]).values[self.alias][0] #", "difference between the max and the min sem : standard", ": Slices the vColumn using a time series rule. \"\"\"", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'var' (Variance). Returns -------", "Also -------- vDataFrame[].drop_outliers : Drops outliers in the vColumn. vDataFrame.outliers", "may not use this file except in compliance with the", "CENTURY / DAY / DECADE / DOQ / DOW /", "= self.parent.format_colnames(y) assert self.parent[y].nunique() == 2, TypeError( \"vColumn {} must", "vColumn outliers. mean : Replaces the upper and lower outliers", "= [cat_priority] check_types( [ (\"by\", by, [str]), (\"max_cardinality\", max_cardinality, [int,", "method. Parameters ---------- method: str, optional The method to use", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame.eval : Evaluates", "\"mean\", \"avg\", \"median\", \"ffill\", \"pad\", \"bfill\", \"backfill\", ], ), (\"expr\",", "return \"({} - {}) / {}({})\".format( self.alias, avg, \"NULLIFZERO\" if", "\".format(\"{}\", lower, lower) if (isinstance(lower, (float, int))) else \"\" )", "\"max\", ] if method != \"cat_stats\": values = { \"index\":", "pat: str): \"\"\" --------------------------------------------------------------------------- Verifies if the regular expression is", "simple: instead of moving # data around for processing, VerticaPy", "\"\"\" return self.ctype().lower() in (\"bool\", \"boolean\") # ---# def isdate(self):", "* result[\"std\"][0] + result[\"avg\"][0], ) else: query = \"SELECT PERCENTILE_CONT({})", "can drastically decrease. Returns ------- float/str median See Also --------", "response_cat == [0, 1], TypeError( \"vColumn {} must be binary", "values. auto : Mean for the numerical and Mode for", "{}, AVG({}), STDDEV({}) FROM {} GROUP BY {}\".format( by[0], self.alias,", "response. \"\"\" import verticapy.stats as st return self.apply(func=st.decode(str_sql(\"{}\"), *argv)) #", "conditional value at risk dtype : vColumn type iqr :", "str Response vColumn. Returns ------- vDataFrame self.parent See Also --------", "min) by: list, optional vColumns used in the partition. return_trans:", "(\"ffill\", \"pad\")) else \" DESC\" partition_by = ( \"PARTITION BY", "Maximum number of nbins used for the discretization (must be", "self.parent.__genSQL__(), self.alias, where, ) result = executeSQL( query=query, title=\"Computing the", "(SELECT {}, ROW_NUMBER() OVER (ORDER BY {}) AS _verticapy_row_nb_ FROM", "self.alias), self.alias, self.parent.__genSQL__(), self.alias, self.alias, self.alias, ) else: query =", "will be filtered. ax: Matplotlib axes object, optional The axes", "part of the vColumn. \"\"\" return self.iloc(limit=limit) # ---# def", "= (self.max() - self.min()) * 1.01 / nbins if h", "Maximum number of points to use to evaluate the approximate", "number of bins will be computed. h: float, optional Interval", "(nullifzero) else \"\", cmax, cmin, ) else: final_transformation = [", "* from verticapy.errors import * ## # # __ __", "self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) columns = [self.alias, by] else: columns", "the current axes. ax: Matplotlib axes object, optional The axes", "of the vColumn based on an aggregation. \"\"\" if isinstance(pie_type,", "chart. rose : Rose chart. It can also be a", "title = \"Reads {}.\".format(self.alias) tail = to_tablesample( \"SELECT {} AS", "Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.75) AS Q3, MAX({})", "True, the lowest element of the list will be included.", "by an input value. \"\"\" check_types([(\"start\", start, [int, float]), (\"step\",", "categorical aggregations during the computation. cat_stats : Computes statistics of", "ORDER BY COUNT(*) DESC LIMIT {1})\"\"\".format( self.alias, max_cardinality ) if", "!= 0, Exception( \"Not enough values to compute the Equal", "\"{}, {}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if elem[0] != None else \"NULL\",", "Warning) return self elif n == 1: try: result =", "donut, rose, ax=None, **style_kwds, ) # ---# def plot( self,", "\"\"\" return self.iloc(limit=limit, offset=-1) # ---# def topk(self, k: int", "result index = [ \"unique\", \"count\", \"mean\", \"std\", \"min\", \"approx_25%\",", "will not be considered during the computation. n: int, optional", "size unit. For example, it can be 'minute' 'hour'... start:", "dtype, [str])]) try: query = \"SELECT {}::{} AS {} FROM", "set to 'cat_stats'. Returns ------- tablesample An object containing the", "COUNT({1}) / {2} AS percent, AVG({3}{4}) AS mean, STDDEV({3}{4}) AS", "[\"{}, {}\".format(elem[0], elem[1]) for elem in result] ), ) executeSQL(", "GROUP BY {0} ORDER BY COUNT(*)\" \" DESC OFFSET {1})", "0 and 1 (1 meaning global outlier). \"\"\" if isinstance(method,", "bool = True, alpha: float = 0.05, ): \"\"\" ---------------------------------------------------------------------------", "PARAMETERS percentile = 0.9) AS 'approx_90%', MAX({3}{4}) AS max FROM", "the input vColumn. \"\"\" return self.apply(func=\"ABS({})\") # ---# def add(self,", "assert name.replace('\"', \"\"), EmptyParameter( \"The parameter 'name' must not be", "Returns the nth most occurent element. Parameters ---------- dropna: bool,", "Python library with scikit-like functionality for conducting # data science", "self.alias.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) ) return self.parent except Exception", "for each vColumn category. In this case, the parameter 'numcol'", "be a vDataFrame column if the method is 'cat_stats'\" )", "= -1, k: int = 6, new_category: str = \"Others\",", "setattr(self.parent, name[1:-1], new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] self.parent.__add_to_history__( \"[Add Copy]: A", "uuid / undefined Returns ------- str vColumn category. See Also", "of an independent variable in relation to the dependent variable.", "else: avg, stddev = ( \"AVG({}) OVER (PARTITION BY {})\".format(", "result, } if ((is_date) and not (method == \"categorical\")) or", "|| (FLOOR({} / {}) * {} + {}{}) || ']'\".format(", "\"apply_test_feature\", ) category = get_category_from_vertica_type(ctype=ctype) all_cols, max_floor = self.parent.get_columns(), 0", "(\"max_cardinality\", max_cardinality, [int, float]), (\"nbins\", nbins, [int, float]), (\"h\", h,", "result. For more information, see utilities.tablesample. See Also -------- vDataFrame.aggregate", "'index', COUNT({1}) AS count, 100 * COUNT({1}) / {2} AS", "x: float): \"\"\" --------------------------------------------------------------------------- Divides the vColumn by the input", "avoid the creation of correlated features. use_numbers_as_suffix: bool, optional Uses", "Method to use to impute the missing values. auto :", "\"\", cmax, cmin, ) else: final_transformation = [ ( \"({}", "[str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}') > 0\".format(\"{}\", pat.replace(\"'\", \"''\")) )", "else \"END\" return self.apply( func=\"TIME_SLICE({}, {}, '{}', '{}')\".format( \"{}\", length,", "1) Returns ------- tablesample An object containing the result. For", ") warnings.warn(warning_message, Warning) return self elif n == 1: try:", "can write the following list: [\"Fouad\", \"Badr\"] Returns ------- vDataFrame", "check_types([(\"name\", name, [str])]) name = quote_ident(name.replace('\"', \"_\")) assert name.replace('\"', \"\"),", "------- bool True if the vColumn is boolean. See Also", "self.parent.shape()[0] ) for elem in sauv: if \"top\" in elem:", "by the input element. Parameters ---------- x: float Input number.", ": number raised to the power of another number round", "\"pow\", \"round\", \"sign\", \"sin\", \"sinh\", \"sqrt\", \"tan\", \"tanh\", ], ),", "len(by) if self.isbool(): warning_message = \"Normalize doesn't work on booleans\".format(self.alias)", "first element (Constant Interpolation). mean : Average. median : median.", "(\"x\", x, [int, float]), ] ) if func not in", "See Also -------- vDataFrame.plot : Draws the time series. \"\"\"", "memory_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn memory usage. Returns -------", "% len(colors)]} ax = self.parent.search( \"{} = '{}'\".format(self.parent[by].alias, column) )[self.alias].density(", "kwargs: query = \"SELECT {} AS {} FROM {} WHERE", "user-defined encoding. Parameters ---------- argv: object Any amount of expressions.", "method=\"numerical\", columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) result = [distinct_count]", "-------- vDataFrame[].apply : Applies a function to the vColumn. \"\"\"", "{} || ';' || (FLOOR({} / {}) * {} +", "prefix.replace('\"', \"_\") + prefix_sep.replace('\"', \"_\") ) n = 1 if", "= product # ---# def quantile(self, x: float, approx: bool", "the absolute value function to the input vColumn. Returns -------", "[ (\"ts\", ts, [str]), (\"q\", q, [tuple]), ( \"start_date\", start_date,", "response, ParameterError( \"Parameter 'response' can not be empty in case", "and method in (\"same_width\", \"auto\"): if not (h) or h", "an aggregation. \"\"\" if isinstance(pie_type, str): pie_type = pie_type.lower() check_types(", "greater or equal to 2.\" ) assert len(breaks) == len(labels)", ") else: raise TypeError(\"The vColumn must be numerical for Normalization\")", "AS {} FROM {} WHERE {} IS NOT NULL LIMIT", "Also -------- vDataFrame.outliers : Computes the vDataFrame Global Outliers. \"\"\"", "- {}) / ({})\".format(self.alias, med, mad) else: final_transformation = [", "- avg) / std robust_zscore : Normalization using the Robust", "set to 'smart'. return_enum_trans: bool, optional Returns the transformation instead", "mean_alpha = \"NULL\" self.apply( func=\"(CASE WHEN {} < {} THEN", "step: bool, optional If set to True, draw a Step", "the vColumn DB type. Returns ------- str vColumn DB type.", "( RandomForestClassifier, RandomForestRegressor, ) drop(tmp_model_name, method=\"model\") if self.parent[response].category() == \"float\":", "self.parent.format_colnames(by) if method == \"auto\": method = \"mean\" if (self.isnum()", "greater than quantile(1-alpha) will be dropped. Returns ------- vDataFrame self.parent", ": median min : minimum mode : most occurent element", "self.parent.__genSQL__(), self.alias, self.alias, self.alias, ) else: query = \"SELECT {}", "\\____ \\ \\ \\/\\ \\ \\ \\ \\____ \\ \\", "bool = True, alpha: float = 0.05 ): \"\"\" ---------------------------------------------------------------------------", "---# def skewness(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'skewness'.", "/ ISOYEAR / MICROSECONDS / MILLENNIUM / MILLISECONDS / MINUTE", "= [distinct_count, self.count()] + [item[1] for item in query_result] index", ") ) return self.parent # ---# def fillna( self, val=None,", "+= final_transformation sauv = {} for elem in self.catalog: sauv[elem]", "(\"response\", response, [str]), (\"nbins\", nbins, [int, float]), ( \"method\", method,", "parameter is equal to 0, an optimised interval will be", "= 5, offset: int = 0): \"\"\" --------------------------------------------------------------------------- Returns a", "1, ParameterError(\"Parameter 'n' must be greater or equal to 1\")", "> 1 else \" was \" if verticapy.options[\"print_info\"]: print(\"{} element{}filled.\".format(total,", ") setattr(self.parent, name, new_vColumn) setattr(self.parent, name.replace('\"', \"\"), new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] +=", "\"({} - {}) / {}({})\".format( \"{}\", avg, \"NULLIFZERO\" if (nullifzero)", "bandwidth: float, optional The bandwidth of the kernel. kernel: str,", "= gen_tmp_name(schema=schema, name=\"kde\") if isinstance(xlim, (tuple, list)): xlim_tmp = [xlim]", "\"'{}', {}\".format(str(distinct_elements[k]).replace(\"'\", \"''\"), k) ] text_info += \"\\t{} => {}\".format(distinct_elements[k],", "a function to the input vColumn. \"\"\" return self.apply(func=\"ABS({})\") #", "self.alias ) executeSQL(query, title=\"Testing the Type casting.\") self.transformations += [", "ax: Matplotlib axes object, optional The axes to plot on.", "input type. Parameters ---------- dtype: str New type. Returns -------", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"variance\"]).values[self.alias][0] variance", "You can write the following list: [\"Fouad\", \"Badr\"] Returns -------", "start, [bool]), ] ) start_or_end = \"START\" if (start) else", "the offset. For example, if n = 1 then this", "alias: str, transformations: list = [], parent=None, catalog: dict =", "USING PARAMETERS percentile = 0.75) AS 'approx_75%', APPROXIMATE_PERCENTILE ({3}{4} USING", "by = [by] if isinstance(order_by, str): order_by = [order_by] check_types(", "Returns ------- float vColumn memory usage (byte) See Also --------", "more information, see utilities.tablesample. See Also -------- vDataFrame[].describe : Computes", "Returns True if the vColumn is numerical. \"\"\" return self.ctype().lower()", "10) return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs) # ---# def get_dummies( self, prefix:", "{} FROM (SELECT {}, ROW_NUMBER() OVER (ORDER BY {}) AS", "verticapy.learn.ensemble import ( RandomForestClassifier, RandomForestRegressor, ) drop(tmp_model_name, method=\"model\") if self.parent[response].category()", "None: pass elif math.isnan(result[i][2]): result[i][2] = None avg = \"DECODE({},", "= [elem[0] for elem in result] elif self.isnum() and method", ") except: avg, stddev = ( \"AVG({}) OVER (PARTITION BY", "\"auto\"): \"\"\" --------------------------------------------------------------------------- Computes the optimal vColumn bar width. Parameters", "the vColumn. method: dict, optional Method to use to impute", "name.replace('\"', \"\"), new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] all_new_features += [name] conj", ") result = executeSQL( query, title=\"Different aggregations to compute the", "OVER (PARTITION BY {})\".format(response, \"{}\"), \"int\", \"float\") ] self.parent.__update_catalog__(erase=True, columns=[self.alias])", "optional If the function has two arguments (example, power or", "MAX({3}{4}) AS max FROM vdf_table\"\"\".format( category, self.alias, self.parent.shape()[0], numcol, cast,", "if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, -({}), {})\".format(x, \"{}\")) else: return self.apply(func=\"{}", "[top]} ) return top # ---# def mul(self, x: float):", "h, ax=ax, **style_kwds) # ---# def boxplot( self, by: str", "method is set to 'smart'. return_enum_trans: bool, optional Returns the", "= int(sauv[\"count\"]) + total self.catalog[\"percent\"] = ( 100 * (int(sauv[\"count\"])", "optional List of the different categories to consider when drawing", "expected store usage. See Also -------- vDataFrame.expected_store_usage : Returns the", "the vColumn. \"\"\" check_types([(\"pat\", pat, [str])]) return self.apply( func=\"REGEXP_COUNT({}, '{}')\".format(\"{}\",", "], ), (\"x\", x, [int, float]), ] ) if func", "Z-Score - The Standard Deviation is null !\".format( self.alias )", "empty.\" ) conditions, column = [], self.alias for idx in", "GROUP BY {0} ORDER BY COUNT(*) DESC LIMIT {1})\"\"\".format( self.alias,", "---------- field: str The field to extract. It must be", "2, ParameterError( \"Parameter 'nbins' must be greater or equals to", "val: int/float/str, optional Value to use to impute the vColumn.", "statistics of {}.\".format(self.alias), method=\"fetchall\", ) result = [distinct_count, self.count()] +", "1 self.parent.__add_to_history__( \"[Normalize]: The vColumn '{}' was normalized with the", "the input method. Parameters ---------- method: str, optional The method", "str = \"\", bandwidth: float = 1.0, kernel: str =", "to compute the Equal Frequency discretization\" ) total, query, nth_elems", "\", \".join(by) ), \"MIN({}) OVER (PARTITION BY {})\".format( self.alias, \",", ") except: pass max_floor -= len(self.transformations) if copy_name: self.add_copy(name=copy_name) for", "to_tablesample(query, title=title) # ---# def normalize( self, method: str =", "+= [name] self.parent.__add_to_history__( \"[Add Copy]: A copy of the vColumn", "---# def fill_outliers( self, method: str = \"winsorize\", threshold: float", "one of the dummies ({name}).\\n\" \"It can be the result", "close_l = \"<=\", \"[\" elif idx == 0: op1, close_l", "check_types( [ (\"method\", method, [\"winsorize\", \"null\", \"mean\"]), (\"alpha\", alpha, [int,", "# ---# def __nonzero__(self): return self.count() > 0 # ---#", "[(\"{}\", self.ctype(), self.category())] self.transformations += final_transformation sauv = {} for", "an optimized h will be computed. pie_type: str, optional The", "= [], ): \"\"\" --------------------------------------------------------------------------- Fills missing elements in the", "\"\"\" check_types( [ (\"method\", method, [\"auto\", \"numerical\", \"categorical\", \"cat_stats\"]), (\"max_cardinality\",", "different transformations. \"\"\" # # Special Methods # # ---#", "return_trans: return \"({} - {}) / {}({} - {})\".format( self.alias,", "number of nbins will be computed. h: float, optional Interval", "= self.parent.format_colnames(by) if method == \"auto\": method = \"mean\" if", "the Information Value (IV) Table. \"\"\" check_types([(\"y\", y, [str]), (\"nbins\",", "using 'mad' (median absolute deviation). Returns ------- float mad See", ": Applies a function to the vColumn. \"\"\" check_types( [", "look like: even: CASE ... WHEN vColumn = argv[2 *", "computation. Returns ------- tablesample An object containing the result. For", "if total > 1 else \" was \" if verticapy.options[\"print_info\"]:", "def sum(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'sum'. Returns", "category, ctype = \"int\", \"bool\" else: category, ctype = self.category(),", "vColumn. vDataFrame[].str_slice : Slices the vColumn. \"\"\" check_types([(\"to_replace\", to_replace, [str]),", "elem in order_by] + by) by = self.parent.format_colnames(by) if method", "\"\"\" --------------------------------------------------------------------------- Draws the vColumn Density Plot. Parameters ---------- by:", "= \"\" ): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using multiple", "look at the final transformation. Returns ------- vDataFrame self.parent See", "__ `\\ / / # | \\/ / / /", "check_types([(\"x\", x, [int, float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, -({}), {})\".format(x,", "remainder of a division operation pow : number raised to", "Iterable)): val = [val] val += list(args) check_types([(\"val\", val, [list])])", "ts, [str]), (\"by\", by, [str]), (\"start_date\", start_date, [str, datetime.datetime, datetime.date]),", "closed on the right. Returns ------- vDataFrame self.parent See Also", "new_vColumn = vColumn( name, parent=self.parent, transformations=transformations, catalog={ \"min\": 0, \"max\":", "done.\") return self.parent # ---# def median( self, approx: bool", "plot_median: bool, optional If set to True, the Median will", "than quantile(1-alpha) will be dropped. Returns ------- vDataFrame self.parent See", "self.apply( func=\"REGEXP_SUBSTR({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def str_replace(self,", "\"COALESCE({}, LAST_VALUE({} IGNORE NULLS) OVER ({} ORDER BY {}))\".format( \"{}\",", "= True, alpha: float = 0.05, ): \"\"\" --------------------------------------------------------------------------- Fills", "range_plot( self, ts: str, q: tuple = (0.25, 0.75), start_date:", "the input vColumns. vDataFrame.applymap : Applies a function to all", "Python object which that stores all user transformations. If the", "in writing, software # distributed under the License is distributed", "governing permissions and # limitations under the License. # #", "+= [ f\"'{first_elem}' {op1} {column} AND {column} {op2} '{second_elem}' THEN", "Matplotlib axes object, optional The axes to plot on. **style_kwds", "== \"auto\": method = \"mean\" if (self.isnum() and self.nunique(True) >", "= sturges elif method.lower() in (\"freedman_diaconis\", \"fd\"): best_h = fd", "to the input vColumn. \"\"\" check_types([(\"x\", x, [int, float])]) return", "by an offset and a limit). Parameters ---------- limit: int,", "self, prefix: str = \"\", prefix_sep: str = \"_\", drop_first:", "not (method == \"categorical\"): result = self.aggregate([\"count\", \"min\", \"max\"]) index", "and # limitations under the License. # # |_ |~)", "1.4826 elif method == \"zscore\": self.catalog[\"mean\"] = 0 self.catalog[\"std\"] =", ") tmp_query += ( \" WHERE {} IS NULL\".format(self.alias) if", "query_result = executeSQL( query=query, title=\"Computing the descriptive statistics of {}.\".format(self.alias),", "mad) else: final_transformation = [ ( \"({} - {}) /", "\"zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"mean\"]) / sauv[ \"std\" ]", "= True): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'unique' (cardinality).", "{})\".format(self.alias, p_alpha, p_1_alpha) ) return self.parent # ---# def dropna(self):", "parameter is very useful for testing to be able to", "the vDataFrame input aggregations. \"\"\" return self.quantile(0.5, approx=approx) # ---#", "pat: str): \"\"\" --------------------------------------------------------------------------- Extracts the regular expression in each", "--------------------------------------------------------------------------- Computes the Information Value (IV) / Weight Of Evidence", "{}, {} FROM {} LIMIT 1\".format( avg, stddev, self.parent.__genSQL__() ),", "\"PARTITION BY {}\".format( \", \".join([quote_ident(column) for column in by]) )", "else: new_column = \"COALESCE({}, {}({}) OVER (PARTITION BY {}))\".format( \"{}\",", "TS field from the vColumn (only if the vColumn type", "base 10 logarithm mod : remainder of a division operation", "= \"Reads {} {} smallest elements.\".format(n, self.alias) return to_tablesample(query, title=title)", "= 0.1) AS 'approx_10%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile =", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"kurtosis\"]).values[self.alias][0] kurt =", "---------- add_history: bool, optional If set to True, the information", "Number of elements to skip. Returns ------- tablesample An object", "Parameters ---------- method: str, optional Method to use to normalize.", "= \"\", method: str = \"density\", of: str = \"\",", "( -threshold * result[\"std\"][0] + result[\"avg\"][0], threshold * result[\"std\"][0] +", "elements and their distributions as percents. Parameters ---------- k: int,", "new_column = \"COALESCE({}, {})\".format(\"{}\", expr) elif method == \"0ifnull\": new_column", "] else: warning_message = \"Can not normalize {} using a", "AS _verticapy_cnt_ FROM {}{}GROUP BY {} ORDER BY _verticapy_cnt_ DESC", "datetime.date] = \"\", area: bool = False, step: bool =", "the vColumn expected store usage (unit: b). Returns ------- int", "float/str minimum See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "assert self.parent[numcol].category() in (\"float\", \"int\"), TypeError( \"The column 'numcol' must", "count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = ( result[0], result[3], result[4],", "if the parameter is empty or invalid. max_cardinality: int/tuple, optional", "elements q% : q quantile (ex: 50% for the median)", "']'\".format( \"{}\", h, h, \"{}\", h, h, h, floor_end ),", "int(nbins): nth_elems += [str(total)] total += nb where = \"WHERE", "{})\".format( self.parent.__genSQL__(), self.alias, self.alias, p_alpha, self.alias, self.alias, p_1_alpha, ) mean_alpha,", "vColumn with a user-specified rule. Parameters ---------- val: int/float/str, optional", "Returns ------- float/str median See Also -------- vDataFrame.aggregate : Computes", "vColumn records. vDataFrame[].str_count : Computes the number of matches for", "return self.parent # ---# def dropna(self): \"\"\" --------------------------------------------------------------------------- Filters the", "), ) else: avg, stddev = ( \"AVG({}) OVER (PARTITION", "__repr__(self): return self.head(limit=verticapy.options[\"max_rows\"]).__repr__() # ---# def _repr_html_(self): return self.head(limit=verticapy.options[\"max_rows\"])._repr_html_() #", "(self.parent[by[0]].nunique() < 50): try: if fun == \"MEDIAN\": fun =", ".transpose() .values[self.alias] ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = (", "of 10. response: str, optional Response vColumn when method is", "\"SELECT {}, MIN({}), MAX({}) FROM {} GROUP BY {}\".format( by[0],", "an Area Plot. step: bool, optional If set to True,", "the dummies. prefix_sep: str, optional Prefix delimitor of the dummies.", "[bool]), (\"n\", n, [int, float])]) if n == 1: pre_comp", "bound quantile(1-alpha) if 'use_threshold' is set to False else the", "parent. catalog: dict, optional Catalog where each key corresponds to", "/ / # _____________ / / # \\ / /", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "1) or (self.category() == \"float\"): trans = ( \"'[' ||", "index = [ \"unique\", \"count\", \"mean\", \"std\", \"min\", \"approx_25%\", \"approx_50%\",", "(SELECT AVG({}) FROM vdf_table WHERE {} > {})\".format( self.parent.__genSQL__(), self.alias,", "risk dtype : vColumn type iqr : interquartile range kurtosis", "statics of {} partitioned by {}.\".format( numcol, self.alias ) values", "{})\".format(func.upper(), \"{}\", x) return self.apply(func=expr) # ---# def astype(self, dtype:", "!= \"cat_stats\") or (numcol), ParameterError( \"The parameter 'numcol' must be", "[]: if fun == \"AVG\": val = self.avg() elif fun", "= \"\", plot_median: bool = False, ax=None, **style_kwds, ): \"\"\"", ": List of the different transformations. \"\"\" # # Special", "See Also -------- vDataFrame.eval : Evaluates a customized expression. \"\"\"", "\"tanh\", ], ), (\"x\", x, [int, float]), ] ) if", "= \"auto\"): \"\"\" --------------------------------------------------------------------------- Computes the optimal vColumn bar width.", "= self.parent.format_colnames(ts) from verticapy.plot import range_curve_vdf return range_curve_vdf( self, ts,", "else \"NULL\", ) for elem in result if elem[1] !=", "= 0, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the bar", "# ---# def _repr_html_(self): return self.head(limit=verticapy.options[\"max_rows\"])._repr_html_() # ---# def __setattr__(self,", "\\ \\ \\-./\\ \\ \\ \\ \\-. \\ # \\", "Parameters ---------- val: int/float/str, optional Value to use to impute", "= xlim custom_lines = [] columns = self.parent[by].distinct() for idx,", "vColumn records by an input value. vDataFrame[].str_slice : Slices the", "/ undefined Returns ------- str vColumn category. See Also --------", "FROM ({}) VERTICAPY_SUBTABLE WHERE split_value IS NOT NULL GROUP BY", "'use_threshold' is set to False else the lower and upper", "the vColumn is numerical. \"\"\" return self.ctype().lower() in (\"bool\", \"boolean\")", "in (\"ffill\", \"pad\", \"bfill\", \"backfill\"): assert order_by, ParameterError( \"If the", ") return vDataFrameSQL(query) elif isinstance(index, int): cast = \"::float\" if", "END Returns ------- vDataFrame self.parent See Also -------- vDataFrame.case_when :", "the vColumn. You can write the following list: [\"Fouad\", \"Badr\"]", "DESC OFFSET {1}) VERTICAPY_SUBTABLE) ORDER BY count DESC\" ).format(self.alias, max_cardinality", "method == \"robust_zscore\": self.catalog[elem] = (sauv[elem] - sauv[\"approx_50%\"]) / (", "data. h: float, optional Interval width if the vColumn is", "method, [str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\",", "NOT NULL LIMIT 20\".format( self.alias, dtype, self.alias, self.parent.__genSQL__(), self.alias )", ") conditions, column = [], self.alias for idx in range(len(breaks)", "not (labels), ParameterError( \"Length of parameter breaks must be equal", "vColumn with the One-Hot Encoding algorithm. Parameters ---------- prefix: str,", "be > 1) k: int, optional The integer k of", "len( self.transformations ) for k in range(max_floor): self.transformations += [(\"{}\",", "right, [bool]), ] ) assert self.isnum() or self.isdate(), TypeError( \"cut", "self.parent.__add_to_history__( \"[Discretize]: The vColumn {} was discretized.\".format(self.alias) ) return self.parent", ": Jarque-Bera index mad : median absolute deviation max :", "instead of the vColumns categories. Returns ------- vDataFrame self.parent See", "1 if drop_first else 0 for k in range(len(distinct_elements) -", "FROM {} WHERE {} IS NOT NULL LIMIT 20\".format( self.alias,", "(by) else \"\" ) order_by_ts = \", \".join([quote_ident(column) + desc", "\"\"\" --------------------------------------------------------------------------- Draws the Geospatial object. Parameters ---------- *args /", "ambiguous columns naming.\\nBy changing one of \" \"the parameters ('prefix',", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'avg' (Average). Returns -------", "optional Response vColumn when method is set to 'smart'. return_enum_trans:", "respective average. null : Replaces the outliers by the NULL", "to use to partition the data. bandwidth: float, optional The", "of: str = \"\", max_cardinality: int = 6, h: float", "< {} THEN {} WHEN {} > {} THEN {}", "the regular expression matches in each of the vColumn records", "# Unless required by applicable law or agreed to in", "be greater or equals to 2 in case of discretization", "1 elif method == \"minmax\": self.catalog[\"min\"] = 0 self.catalog[\"max\"] =", "= self.distinct() if distinct_elements not in ([0, 1], [1, 0])", "to partition the TS. start_date: str / date, optional Input", "of the vColumn. \"\"\" return self.iloc(limit=limit, offset=-1) # ---# def", "Union[int, tuple] = (6, 6), h: Union[int, float, tuple] =", "(\"h\", h, [int, float]), (\"response\", response, [str]), (\"nbins\", nbins, [int,", "\\ /\\ \\ /\\ \\/\\ \\ /\\ \"-./ \\ /\\", "the threshold it will be considered as an outlier. use_threshold:", "\"\"\" --------------------------------------------------------------------------- Returns the vColumn memory usage. Returns ------- float", "vColumn memory usage. Returns ------- float vColumn memory usage (byte)", "WHEN {} > {} THEN {} ELSE {} END)\".format( \"{}\",", "the vDataFrame history. Returns ------- vDataFrame self.parent See Also --------", "--------------------------------------------------------------------------- Aggregates the vColumn using 'product'. Returns ------- float product", "add_history, [bool])]) try: parent = self.parent force_columns = [ column", "optional Uses the threshold instead of the 'alpha' parameter. alpha:", "to be able to look at the final transformation. Returns", "= executeSQL( query=query, title=\"Computing the descriptive statistics of {}.\".format(self.alias), method=\"fetchall\",", "{} ORDER BY _verticapy_cnt_ DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY", "for elem in order_by] + by) by = self.parent.format_colnames(by) if", "mean encoding\" ) max_floor = len(self.parent[response].transformations) - len( self.transformations )", "= val.replace(\"'\", \"''\") if val != None: new_column = \"COALESCE({},", "must be numerical to use a mean encoding\" ) max_floor", "regular expression in each record of the vColumn. vDataFrame[].str_replace :", "in (\"log\", \"mod\", \"pow\", \"round\"): expr = \"{}({})\".format(func.upper(), \"{}\") else:", "if the parameter 'by' is empty\\nIf you want to normalize", "(\"use_numbers_as_suffix\", use_numbers_as_suffix, [bool]), ] ) distinct_elements = self.distinct() if distinct_elements", "\\ \\_\\ \\ \\_\\\\\"\\_\\ # \\/_/ \\/_____/ \\/_____/ \\/_____/ \\/_____/", "\" \"issue.\" ) for k in range(len(distinct_elements) - n): name", "be equal to the length of parameter 'labels' + 1", "\\/\\ \\ /\\ \"-./ \\ /\\ \"-.\\ \\ # \\", "\"\"\" # # Special Methods # # ---# def __init__(", "entire data science life cycle, uses a ‘pipeline’ mechanism to", "--------------------------------------------------------------------------- Draws the vColumn Density Plot. Parameters ---------- by: str,", "bins. smart : Uses the Random Forest on a response", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"mad\"]).values[self.alias][0] #", "of the vColumn. The vColumn will be transformed. Parameters ----------", "{} IS NOT NULL LIMIT 20\".format( self.alias, dtype, self.alias, self.parent.__genSQL__(),", "f\"'{first_elem}' {op1} {column} AND {column} {op2} '{second_elem}' THEN '{label}'\" ]", "of, max_cardinality, h, ax=ax, **style_kwds, ) # ---# def std(self):", "1) x ORDER BY verticapy_agg DESC\".format( self.alias, bin_spatial_to_str(self.category(), self.alias), self.alias,", "try: ctype = get_data_types( \"SELECT {} AS apply_test_feature FROM {}", "tail.count = self.parent.shape()[0] tail.offset = offset tail.dtype[self.alias] = self.ctype() tail.name", "float]), (\"nbins\", nbins, [float, int]), ] ) if by: self.parent.are_namecols_in(by)", "all the vColumns. vDataFrame.eval : Evaluates a customized expression. \"\"\"", "the n smallest elements in the vColumn. \"\"\" check_types([(\"n\", n,", ": vColumn type iqr : interquartile range kurtosis : kurtosis", "to normalize.\", method=\"fetchall\", ) for i in range(len(result)): if result[i][2]", "if not (result) else result[0][0] if not (dropna): n =", "0.25) AS Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.75) AS", "the spider plot of the input vColumn based on an", "index_stop = index.stop index_start = index.start if not (isinstance(index_start, int)):", "( \"({} - {}) / ({})\".format(\"{}\", med, mad), \"float\", \"float\",", ": q Quantile of the vColumn 'of' (ex: 50% to", "step, [bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) if by:", "self.ctype() copy_trans = [elem for elem in self.transformations] total =", "---# def dropna(self): \"\"\" --------------------------------------------------------------------------- Filters the vDataFrame where the", "copy_trans = [elem for elem in self.transformations] total = self.count()", "\"({} - {}) / {}({} - {})\".format( self.alias, cmin, \"NULLIFZERO\"", "/ __ `\\ / / # | \\/ / /", "an advanced analytical function on a specific vColumn. \"\"\" return", "the average of the {}'s lower and upper outliers.\".format( self.alias", "int/float/tuple, optional Interval width of the vColumns 1 and 2", "the tail of the vColumn. \"\"\" check_types([(\"limit\", limit, [int, float]),", "total += sys.getsizeof(elem) return total # ---# def min(self): \"\"\"", "type, category) parent: vDataFrame, optional Parent of the vColumn. One", "field from the vColumn (only if the vColumn type is", "Fills missing elements in the vColumn with a user-specified rule.", "---# def mode(self, dropna: bool = False, n: int =", "all_new_features += [name] conj = \"s were \" if len(all_new_features)", "column must be numerical to use a mean encoding\" )", "data science projects on data stored in Vertica, taking advantage", "this method will return the mode of the vColumn. Returns", ") ] if method != \"robust_zscore\": max_floor = 0 for", "equal to 1\") where = \" WHERE {} IS NOT", "all user transformations. If the vDataFrame represents the entire relation,", "pie_type == \"rose\" else False if of: self.parent.are_namecols_in(of) of =", ": Encodes the vColumn with Label Encoding. vDataFrame[].get_dummies : Encodes", "if elem[0] != None else \"NULL\", elem[2] if elem[2] !=", "if not (\"color\" in kwargs): from verticapy.plot import gen_colors kwargs[\"color\"]", "{} GROUP BY {};\".format( by[0], fun, self.alias, self.parent.__genSQL__(), by[0] )", "records. vDataFrame[].extract : Extracts the regular expression in each record", ": Minimum of the vColumn 'of'. max : Maximum of", "kwargs): from verticapy.plot import gen_cmap kwargs[\"cmap\"] = gen_cmap()[0] else: if", "\".join(query) ) title = \"Describes the statics of {} partitioned", "\" if total > 1 else \" was \" if", "parameter 'name', you'll be able to solve this issue.\" )", "Number of bins. If empty, an optimized number of bins", "with the specified name. \\u26A0 Warning : SQL code generation", "elif method == \"0ifnull\": category, ctype = \"int\", \"bool\" else:", "Maximum number of vColumn distinct elements to be used as", "pat.replace(\"'\", \"''\")) ) # ---# def str_extract(self, pat: str): \"\"\"", "\"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) ) result[idx][1] = \"NULL\" if (elem[1] == None)", "typing import Union # VerticaPy Modules import verticapy from verticapy.utilities", "# ---# def dropna(self): \"\"\" --------------------------------------------------------------------------- Filters the vDataFrame where", "prefix = \"approx_\" if approx else \"\" return self.aggregate(func=[prefix +", "series. \"\"\" check_types( [ (\"ts\", ts, [str]), (\"q\", q, [tuple]),", "= 1.0, kernel: str = \"gaussian\", nbins: int = 200,", "with 0 and 1 (1 meaning global outlier). \"\"\" if", "try: result = executeSQL( \"SELECT {}, MIN({}), MAX({}) FROM {}", "'ts' is greater than November 1993 the 3rd. area: bool,", "vColumns 1 and 2 to be used as categorical (No", "method: dict, optional Method to use to impute the missing", "= val # # Methods # # ---# def aad(self):", "check_types( [ (\"method\", method, [str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality,", "max_floor -= len(self.transformations) for k in range(max_floor): self.transformations += [(\"{}\",", "0 # ---# def __repr__(self): return self.head(limit=verticapy.options[\"max_rows\"]).__repr__() # ---# def", "\".join(by) ), ) else: cmax, cmin = ( \"MAX({}) OVER", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"variance\"]).values[self.alias][0] variance =", ": Computes the vColumn descriptive statistics. \"\"\" check_types([(\"k\", k, [int,", "BY 1\".format( self.alias, y, y, query, ) query = \"SELECT", ") max_floor = len(self.parent[response].transformations) - len( self.transformations ) for k", "\"gaussian\", nbins: int = 200, xlim: tuple = None, ax=None,", "\"std\"]).values[self.alias] if stddev == 0: warning_message = \"Can not normalize", "NOT NULL\".format(self.alias) if (dropna) else \"\" query = \"SELECT {}", "= 0.05, ): \"\"\" --------------------------------------------------------------------------- Fills the vColumns outliers using", "func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) category = get_category_from_vertica_type(ctype=ctype)", "0: nullifzero = 0 avg, stddev = self.aggregate([\"avg\", \"std\"]).values[self.alias] if", "--------------------------------------------------------------------------- Slices the vColumn. The vColumn will be transformed. Parameters", "from verticapy.utilities import * from verticapy.toolbox import * from verticapy.errors", "GROUP BY {} ORDER BY _verticapy_cnt_ DESC {}\".format( bin_spatial_to_str(self.category(), self.alias),", "== \"topk\": assert k >= 2, ParameterError( \"Parameter 'k' must", "approximate quantile). See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "for k in range(len(distinct_elements)): expr += [ \"'{}', {}\".format(str(distinct_elements[k]).replace(\"'\", \"''\"),", "# # Modules # # Standard Python Modules import math,", "interval size to convert to use to convert the vColumn.", "str, nbins: int = 10): \"\"\" --------------------------------------------------------------------------- Computes the Information", "\"COALESCE({}, {}({}) OVER (PARTITION BY {}))\".format( \"{}\", fun, \"{}\", \",", "-------- vDataFrame[].describe : Computes the vColumn descriptive statistics. \"\"\" check_types([(\"k\",", "optional Number of most occurent elements to return. dropna: bool,", "columns, method, of, max_cardinality, h, ax=ax, **style_kwds, ) # ---#", "[int, float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, -({}), {})\".format(x, \"{}\")) else:", "mean : Average of the vColumn 'of'. min : Minimum", "NULL\".format(self.alias) if (category in (\"None\", None)) else \" WHERE {}", "parent=None, catalog: dict = {} ): self.parent, self.alias, self.transformations =", "int, optional Number of most occurent elements to return. Returns", "name, bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=xlim_tmp, store=False, ) try: result =", "elements cvar : conditional value at risk dtype : vColumn", "str, optional Prefix delimitor of the dummies. drop_first: bool, optional", "empty or invalid. max_cardinality: int, optional Maximum number of vColumn", "Also -------- vDataFrame.topk : Returns the vColumn most occurent elements.", "Parameters ---------- *args / **kwargs Any optional parameter to pass", "), print_time_sql=False, ) except: avg, stddev = ( \"AVG({}) OVER", "\"method\", method, [ \"auto\", \"mode\", \"0ifnull\", \"mean\", \"avg\", \"median\", \"ffill\",", "new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] all_new_features += [name] conj = \"s", "float optimal bar width. \"\"\" check_types( [(\"method\", method, [\"sturges\", \"freedman_diaconis\",", "else 0 for k in range(len(distinct_elements) - n): name =", "BY {} ORDER BY {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.alias,", "return self.aggregate(func=[\"unique\"]).values[self.alias][0] # ---# def pie( self, method: str =", "median) prod : product range : difference between the max", "was filled.\".format( self.alias ) warnings.warn(warning_message, Warning) return self.parent if isinstance(val,", "whole number cos : trigonometric cosine cosh : hyperbolic cosine", "= 0 avg, stddev = self.aggregate([\"avg\", \"std\"]).values[self.alias] if stddev ==", "the vColumn Density Plot. Parameters ---------- by: str, optional vColumn", "= \"\" if (method in (\"ffill\", \"pad\")) else \" DESC\"", "str, optional SQL expression. by: list, optional vColumns used in", "= True): \"\"\" --------------------------------------------------------------------------- Slices and transforms the vColumn using", "END\" self.apply(func=expr) # ---# def ctype(self): \"\"\" --------------------------------------------------------------------------- Returns the", "approximate density function. Increasing this parameter will increase the precision", "the merging category when using the 'topk' method. RFmodel_params: dict,", "category) parent: vDataFrame, optional Parent of the vColumn. One vDataFrame", "return self.transformations[-1][2] # ---# def clip(self, lower=None, upper=None): \"\"\" ---------------------------------------------------------------------------", "/ spatial / uuid / undefined Returns ------- str vColumn", "return [item for sublist in query_result for item in sublist]", "str = \"\" ): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "ALL \".join(query), nbins - 1 ) result = executeSQL( query=query,", "of most occurent elements to return. Returns ------- tablesample An", "outliers. After normalizing the data (Z-Score), if the absolute value", "\"varchar\", \"text\", ) elif self.isnum() and method == \"same_freq\": assert", "Modules import verticapy from verticapy.utilities import * from verticapy.toolbox import", "executeSQL( \"SELECT {}, MIN({}), MAX({}) FROM {} GROUP BY {}\".format(", ": Mean for the numerical and Mode for the categorical", "to pass to the geopandas plot function. For more information,", "cardinality). See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "trans = (trans, \"varchar\", \"text\") if return_enum_trans: return trans else:", ") desc = \"\" if (method in (\"ffill\", \"pad\")) else", "vColumns. vDataFrame.eval : Evaluates a customized expression. \"\"\" if isinstance(func,", "self.parent.format_colnames(numcol) assert self.parent[numcol].category() in (\"float\", \"int\"), TypeError( \"The column 'numcol'", "isinstance(index_stop, int): if index_stop < 0: index_stop += self.parent.shape()[0] limit", "in performance. The catalog will be updated when the parent", "numerical. See Also -------- vDataFrame[].isbool : Returns True if the", "> 1) Returns ------- tablesample An object containing the result.", "def value_counts(self, k: int = 30): \"\"\" --------------------------------------------------------------------------- Returns the", "= \"SELECT COUNT({}) AS NAs, MIN({}) AS min, APPROXIMATE_PERCENTILE({} USING", "[bool]), (\"right\", right, [bool]), ] ) assert self.isnum() or self.isdate(),", "included. right: bool, optional How the intervals should be closed.", "-------- vDataFrame.add_copy : Creates a copy of the vColumn. \"\"\"", "FROM {} WHERE {} IS NOT NULL) VERTICAPY_SUBTABLE {}\".format( self.alias,", "\"approx_\" if approx else \"\" return self.aggregate(func=[prefix + \"{}%\".format(x *", "1, \"count\": self.parent.shape()[0], \"percent\": 100.0, \"unique\": 2, \"approx_unique\": 2, \"prod\":", "the vColumn using 'sem' (standard error of mean). Returns -------", "input functions. Parameters ---------- func: list List of the different", "null !\".format( self.alias ) warnings.warn(warning_message, Warning) return self elif (n", "vDataFrame[].head : Returns the head of the vColumn. vDataFrame[].tail :", "an optimized h will be computed. ax: Matplotlib axes object,", "optional The axes to plot on. **style_kwds Any optional parameter", "other vColumns. Parameters ---------- add_history: bool, optional If set to", "({3}{4} USING PARAMETERS percentile = 0.1) AS 'approx_10%', APPROXIMATE_PERCENTILE ({3}{4}", "{}, NULL))\".format( \"{}\", by[0], \", \".join( [\"{}, {}\".format(elem[0], elem[1]) for", "not in (\"log\", \"mod\", \"pow\", \"round\"): expr = \"{}({})\".format(func.upper(), \"{}\")", "using 'kurtosis'. Returns ------- float kurtosis See Also -------- vDataFrame.aggregate", "the mode of the vColumn. Returns ------- str/float/int vColumn nth", "to use to compute the aggregation. h: int/float/tuple, optional Interval", "using a user-defined encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode", "''\", \"varchar\", \"text\") else: trans = (\"{} || ''\", \"varchar\",", "vColumn will be transformed. Parameters ---------- pat: str Regular expression.", "even: CASE ... WHEN vColumn = argv[2 * i] THEN", "to True, the intervals will be closed on the right.", "\"NULL\", ) for elem in result if elem[1] != None", "executeSQL( \"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}\".format( bin_spatial_to_str(self.category(), self.alias), self.parent.__genSQL__(), ), title=\"Computing", "tangent cbrt : cube root ceil : value up to", "(pre_comp != None): return pre_comp assert n >= 1, ParameterError(\"Parameter", "the vDataFrame memory usage. \"\"\" import sys total = (", ") for i in range(len(result)): if result[i][2] == None: pass", "= max(2.0 * (vColumn_075 - vColumn_025) / (count) ** (1.0", ": Draws the time series. \"\"\" check_types( [ (\"ts\", ts,", "of a response. \"\"\" import verticapy.stats as st return self.apply(func=st.decode(str_sql(\"{}\"),", "var : variance Other aggregations could work if it is", "ParameterError( \"Length of parameter 'breaks' must be greater or equal", "[int, float]), (\"nbins\", nbins, [int, float]), ] ) if of:", "isinstance(pie_type, str): pie_type = pie_type.lower() check_types( [ (\"method\", method, [str]),", "If empty, an optimized h will be computed. ax: Matplotlib", "List of the different transformations. \"\"\" # # Special Methods", "\"Badr\"] Returns ------- vDataFrame The vDataFrame of the search. See", "== \"zscore\": if n == 0: nullifzero = 0 avg,", "= { \"index\": [item[0] for item in result], \"count\": [int(item[1])", "SUM({}) AS events FROM ({}) x GROUP BY 1\".format( self.alias,", "self.alias.replace('\"', \"\") + prefix_sep.replace('\"', \"_\") if not (prefix) else prefix.replace('\"',", "The vDataFrame of the search. See Also -------- vDataFrame.isin :", "\"{}\")) # ---# def decode(self, *argv): \"\"\" --------------------------------------------------------------------------- Encodes the", "# \\/ / / # / / # / /", "func 'x -> {}'.\".format( self.alias.replace('\"', \"\"), func.replace(\"{}\", \"x\"), ) )", "lower_when = ( \"WHEN {} < {} THEN {} \".format(\"{}\",", "use to compute the optimal h. auto : Combination of", "input aggregations. \"\"\" return self.quantile(0.5, approx=approx) # ---# def memory_usage(self):", "the function's performance can drastically decrease. Returns ------- float quantile", "self.parent.shape()[0], self.parent.__genSQL__(), dropna, self.alias, topk, ) result = executeSQL( query,", "datetime.date, int, float], ), ( \"end_date\", end_date, [str, datetime.datetime, datetime.date,", "based on an aggregation. Parameters ---------- method: str, optional The", "else \"topk\", nbins=nbins, k=nbins, new_category=\"Others\", return_enum_trans=True, )[0].replace(\"{}\", self.alias) query =", "( self.parent.describe( method=\"numerical\", columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) count,", "under the License. # # |_ |~) _ _| _", "to normalize.\".format( by[0] ), method=\"fetchall\", ) cmin = \"DECODE({}, {},", "changing one of \" \"the parameters ('prefix', 'prefix_sep'), you'll be", "the method '{}'.\".format( self.alias, method ) ) else: raise TypeError(\"The", "\\ \\ \\ \\-. \\ # \\ \\__| \\ \\_____\\", "use a mean encoding\" ) max_floor = len(self.parent[response].transformations) - len(", "date like (date, datetime, timestamp...) or numerical. by: str, optional", "kernel. kernel: str, optional The method used for the plot.", "the vColumn using a user-defined encoding. Parameters ---------- argv: object", ") elif self.isnum() and method == \"same_freq\": assert nbins >=", "+= [(\"{}\", self.ctype(), self.category())] self.transformations += [ (\"AVG({}) OVER (PARTITION", "the following: date / int / float / text /", "in query_result for item in sublist] # ---# def div(self,", "information, see utilities.tablesample. See Also -------- vDataFrame.iv_woe : Computes the", "column of that relation. vColumns simplify several processes with its", "{}) {}\".format( self.parent.__genSQL__(), query ) query_result = executeSQL( query=query, title=\"Computing", "on numerical / date-like vColumns.\" ) assert len(breaks) >= 2,", "vColumn is numerical. \"\"\" return self.category() == \"date\" # ---#", "warnings.warn(warning_message, Warning) return self elif method == \"minmax\": if n", "f\"A vColumn has already the alias {name}.\\nBy changing the parameter", "BY split_value::float\".format( \" UNION ALL \".join(query), nbins - 1 )", ": Returns the head of the vColumn. vDataFrame[].tail : Returns", "See Also -------- vDataFrame.drop: Drops the input vColumns from the", "statistics of a numerical column for each vColumn category. In", "Parameters ---------- name: str Name of the copy. Returns -------", "kernel: str, optional The method used for the plot. gaussian", "the optimal vColumn bar width. Parameters ---------- method: str, optional", "start: bool, optional If set to True, the record will", "[], parent=None, catalog: dict = {} ): self.parent, self.alias, self.transformations", "MAX({}) AS max FROM {}\".format( self.alias, self.alias, self.alias, self.alias, self.alias,", "each record of the vColumn. vDataFrame[].str_replace : Replaces the regular", "the vColumn {} using the following mapping:{}\".format( self.alias, text_info )", "sauv[\"count\"] / self.parent.shape()[0] ) for elem in sauv: if \"top\"", "used in the partition. return_trans: bool, optimal If set to", ") expr = \"DECODE({}, '{}', 1, 0)\".format( \"{}\", str(distinct_elements[k]).replace(\"'\", \"''\")", "vColumn_max = result sturges = max( float(vColumn_max - vColumn_min) /", "str / date, optional Input Start Date. For example, time", "ceiling. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].date_part :", "self.alias: [store_usage]} ) return store_usage # ---# def str_contains(self, pat:", "in self.catalog: total += sys.getsizeof(elem) return total # ---# def", "Mode for the categorical vColumns. bfill : Back Propagation of", "\"\" if (n == 1) else str(int(n)) if isinstance(top, decimal.Decimal):", "cast, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index, ) return executeSQL( query=query, title=\"Getting the", "percent FROM {}{} GROUP BY {} ORDER BY _verticapy_cnt_ DESC", "for idx, column in enumerate(columns): param = {\"color\": colors[idx %", "vColumn. \"\"\" return self.apply(func=\"ABS({})\") # ---# def add(self, x: float):", "( name.replace(\" \", \"_\") .replace(\"/\", \"_\") .replace(\",\", \"_\") .replace(\"'\", \"_\")", "unit. For example, it can be 'minute' 'hour'... start: bool,", "brings the logic to the data. # # # Modules", "{}, ROW_NUMBER() OVER (ORDER BY {}) AS _verticapy_row_nb_ FROM {}", ") ) else: if verticapy.options[\"print_info\"]: print(\"Nothing was filled.\") self.transformations =", "vColumn {} was deleted from the vDataFrame.\".format(self.alias) ) return parent", "an outlier. use_threshold: bool, optional Uses the threshold instead of", "the function: x -> x^2 + 2 use \"POWER({}, 2)", "\"bfill\", \"backfill\", ], ), (\"expr\", expr, [str]), (\"by\", by, [list]),", "= verticapy.options[\"temp_schema\"] if not (schema): schema = \"public\" name =", "to sequentialize # data transformation operations, and offers beautiful graphical", "this parameter is equal to 0, an optimised interval will", "'labels' + 1 or parameter 'labels' must be empty.\" )", "the different categories {} to normalize.\".format( by[0] ), method=\"fetchall\", )", "round(h, 4) elif h > 0.000001: h = round(h, 6)", "def cut( self, breaks: list, labels: list = [], include_lowest:", "order_by_ts = \", \".join([quote_ident(column) + desc for column in order_by])", "Column.\".format( self.alias, response ) ) if verticapy.options[\"print_info\"]: print(\"The mean encoding", "the different transformations. Each transformation must be similar to the", "and merge the other into one unique category. h: float,", "Returns ------- float var See Also -------- vDataFrame.aggregate : Computes", "\"\"\" return self.transformations[-1][1].lower() dtype = ctype # ---# def date_part(self,", "{}.\".format(self.alias), method=\"fetchall\", ) return [item for sublist in query_result for", "by = self.parent.format_colnames(by) from verticapy.plot import boxplot return boxplot(self, by,", "to the length of parameter 'labels' + 1 or parameter", "0, 1)\" elif method in (\"mean\", \"avg\", \"median\"): fun =", "\"At least 'lower' or 'upper' must have a numerical value\"", "optimal bar width. \"\"\" check_types( [(\"method\", method, [\"sturges\", \"freedman_diaconis\", \"fd\",", "val): self.__dict__[attr] = val # # Methods # # ---#", "------- int vColumn cardinality (or approximate cardinality). See Also --------", "(sauv[elem] - sauv[\"min\"]) / ( sauv[\"max\"] - sauv[\"min\"] ) except:", "can only have one parent. catalog: dict, optional Catalog where", "Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].drop_outliers : Drops", "--------------------------------------------------------------------------- Aggregates the vColumn using 'max' (Maximum). Returns ------- float/str", "discretization (must be > 1) k: int, optional The integer", "0, nbins: int = -1, k: int = 6, new_category:", "\"NULL\" self.apply( func=\"(CASE WHEN {} < {} THEN {} WHEN", "boolean. See Also -------- vDataFrame[].isdate : Returns True if the", "= get_data_types( \"SELECT {} AS apply_test_feature FROM {} WHERE {}", "start: int, step: int): \"\"\" --------------------------------------------------------------------------- Slices the vColumn. The", "by[0], \", \".join( [ \"{}, {}\".format( \"'{}'\".format(str(elem[0]).replace(\"'\", \"''\")) if elem[0]", "1], result[i], result[i - 1], result[i] ) trans += \"", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"aad\"]).values[self.alias][0]", "If empty, names will be generated. include_lowest: bool, optional If", "hist return hist(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds)", "__ __ __ __ __ __ __ # /\\ \\", "you'll be able to solve this issue.\" ) new_vColumn =", "\"\"\" return self.category() in (\"float\", \"int\") # ---# def iv_woe(self,", "{}, \"regr_count\": {}, \"regr_intercept\": {}, \"regr_r2\": {}, \"regr_slope\": {}, \"regr_sxx\":", "of the copy. Returns ------- vDataFrame self.parent See Also --------", "using the Robust Z-Score (median and mad). (x - median)", "vColumn. vDataFrame[].tail : Returns the tail of the vColumn. \"\"\"", "vColumn using 'product'. Returns ------- float product See Also --------", "'numcol' must be a vDataFrame column if the method is", "draw the Plot. start_date: str / date, optional Input Start", "transformed. Parameters ---------- field: str The field to extract. It", "otherwise. Returns ------- bool True if the vColumn is numerical.", "\"\", plot_median: bool = False, ax=None, **style_kwds, ): \"\"\" ---------------------------------------------------------------------------", "\"\"\" return self.aggregate([\"skewness\"]).values[self.alias][0] skew = skewness # ---# def slice(self,", "AS 'approx_90%', MAX({3}{4}) AS max FROM vdf_table\"\"\".format( category, self.alias, self.parent.shape()[0],", "\" WHERE {} IS NULL\".format(self.alias) if (category in (\"None\", None))", "category when using the 'topk' method. RFmodel_params: dict, optional Dictionary", "with One-Hot Encoding. vDataFrame[].label_encode : Encodes the vColumn with Label", "isnum(self): \"\"\" --------------------------------------------------------------------------- Returns True if the vColumn is numerical,", "categories. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode :", "{}{}ELSE {} END)\".format(lower_when, upper_when, \"{}\") self.apply(func=func) return self.parent # ---#", "in query_result] index = [\"unique\", \"count\"] + [item[0] for item", "({})\".format(\"{}\", x)) # ---# def nlargest(self, n: int = 10):", "] ) method = method.lower() self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) nullifzero,", "Z-Score (median and mad). (x - median) / (1.4826 *", "float], (\"approx\", approx, [bool]))]) prefix = \"approx_\" if approx else", "self.alias, self.alias, n ) title = \"Reads {} {} smallest", "/ n ** (1 / 3)] sturges : Sturges [CEIL(log2(n))", "= 0 all_partition = by if method in [\"ffill\", \"pad\",", "1 (1 meaning global outlier). \"\"\" if isinstance(method, str): method", "if not (schema): schema = \"public\" tmp_view_name = gen_tmp_name(schema=schema, name=\"view\")", "end_date, area, step, ax=ax, **style_kwds, ) # ---# def product(self):", "= int(max(math.floor(h), 1)) floor_end = -1 if (self.category() == \"int\")", "setting this parameter to False, the function's performance can drastically", "vColumn descriptive statistics. \"\"\" return self.describe(method=\"categorical\", max_cardinality=k) # ---# def", "__ __ __ __ __ __ # /\\ \\ /", "the vColumn using 'min' (Minimum). Returns ------- float/str minimum See", "must be greater or equals to 2 in case of", "True, alpha: float = 0.05, ): \"\"\" --------------------------------------------------------------------------- Fills the", "of the vColumn records. vDataFrame[].extract : Extracts the regular expression", "[str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int, float]), (\"cat_priority\",", "One-Hot Encoding algorithm. Parameters ---------- prefix: str, optional Prefix of", "has already the alias {new_name}.\\nBy changing the parameter 'new_name', you'll", "Drops the vColumn missing values. \"\"\" if isinstance(by, str): by", "lower: float, optional Lower bound. upper: float, optional Upper bound.", "(method == \"cat_stats\") and (numcol != \"\"): numcol = self.parent.format_colnames(numcol)", "mode(self, dropna: bool = False, n: int = 1): \"\"\"", "\"zscore\": if n == 0: nullifzero = 0 avg, stddev", "--------------------------------------------------------------------------- Aggregates the vColumn using 'std' (Standard Deviation). Returns -------", "LAST_VALUE({} IGNORE NULLS) OVER ({} ORDER BY {}))\".format( \"{}\", \"{}\",", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "will be trained if the response is numerical (except ints", "Parameters ---------- response: str Response vColumn. Returns ------- vDataFrame self.parent", "parameter to pass to the Matplotlib functions. Returns ------- ax", "Evaluates a customized expression. \"\"\" if isinstance(func, str_sql): func =", "bool, optional How the intervals should be closed. If set", "--------------------------------------------------------------------------- Drops the vColumn from the vDataFrame. Dropping a vColumn", "Also -------- vDataFrame[].date_part : Extracts a specific TS field from", "fun == \"MEDIAN\": fun = \"APPROXIMATE_MEDIAN\" query = \"SELECT {},", "executeSQL( \"SELECT {} FROM {} LIMIT 1\".format( new_column.format(self.alias), self.parent.__genSQL__() ),", "for column in all_cols: try: if (quote_ident(column) in func) or", "intervals will be closed on the right. Returns ------- vDataFrame", "use_threshold, [bool]), (\"threshold\", threshold, [int, float]), ] ) if use_threshold:", "---# def range_plot( self, ts: str, q: tuple = (0.25,", "--------------------------------------------------------------------------- Encodes the vColumn using the average of the response", "Aggregates the vColumn using an input 'quantile'. Parameters ---------- x:", "THEN '{label}'\" ] expr = \"CASE WHEN \" + \"", "if (dropna) else \" \" result = executeSQL( \"SELECT {}", "model.get_params() try: model.fit(tmp_view_name, [self.alias], response) query = [ \"(SELECT READ_TREE(USING", "the # entire data science life cycle, uses a ‘pipeline’", "ax kernel = kernel.lower() from verticapy.learn.neighbors import KernelDensity schema =", "vColumns. bfill : Back Propagation of the next element (Constant", "median). It can also be a cutomized aggregation (ex: AVG(column1)", "\" WHEN \".join(conditions) + \" END\" self.apply(func=expr) # ---# def", "elif idx == 0: op1, close_l = \"<\", \"]\" if", "head of the vColumn. Parameters ---------- limit: int, optional Number", "offset = max(0, self.parent.shape()[0] - limit) title = \"Reads {}.\".format(self.alias)", "self.parent See Also -------- vDataFrame.fill_outliers : Fills the outliers in", "ax Matplotlib axes object See Also -------- vDataFrame.bar : Draws", "self.alias ) warnings.warn(warning_message, Warning) return self.parent if isinstance(val, str): val", "\"\"\" --------------------------------------------------------------------------- Encodes the vColumn using the average of the", "\\ / # \\/ # _ # \\ / _", "0.01: h = round(h, 2) elif h > 0.0001: h", "instead of moving # data around for processing, VerticaPy brings", "the learning and scoring phases. xlim: tuple, optional Set the", "elif h > 0.0001: h = round(h, 4) elif h", "catalog will be updated when the parent vDataFrame is modified.", "elif method == \"0ifnull\": new_column = \"DECODE({}, NULL, 0, 1)\"", "except: pass total = int(total) conj = \"s were \"", "for vColumns 1 and 2 to be used as categorical", "only categorical aggregations during the computation. cat_stats : Computes statistics", "[], [] while total < int(float(count / int(nbins))) * int(nbins):", "\"\"\" check_types( [ (\"by\", by, [str]), (\"method\", method, [str]), (\"of\",", "--------------------------------------------------------------------------- Applies a function to the vColumn. Parameters ---------- func:", "NOT NULL GROUP BY {0} ORDER BY COUNT(*)\" \" DESC", "add_history: bool = True): \"\"\" --------------------------------------------------------------------------- Drops the vColumn from", "Parameters ---------- method: str, optional The describe method. auto :", "length: int, unit: str = \"second\", start: bool = True):", "after the comma. Parameters ---------- n: int Number of digits", "HOUR / ISODOW / ISOWEEK / ISOYEAR / MICROSECONDS /", "()) AS pt_events FROM ({}) x\".format( self.alias, query, ) query", "self.parent See Also -------- vDataFrame[].apply : Applies a function to", "[float, int]), ] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by)", "result = [elem[0] for elem in result] elif self.isnum() and", "\"auto\", h: float = 0, nbins: int = -1, k:", "upper and lower outliers by their respective average. null :", "See Also -------- vDataFrame[].str_count : Computes the number of matches", "): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using the input method.", "is numerical. See Also -------- vDataFrame[].isbool : Returns True if", "the MAX. MAX = MIN !\".format( self.alias ) warnings.warn(warning_message, Warning)", "'topk' method. RFmodel_params: dict, optional Dictionary of the Random Forest", "\", \".join([quote_ident(column) + desc for column in order_by]) new_column =", "------- float optimal bar width. \"\"\" check_types( [(\"method\", method, [\"sturges\",", "datetime, timestamp...) or numerical. by: str, optional vColumn to use", "use a method in zscore|minmax\" warnings.warn(warning_message, Warning) return self mad,", "= len(self.parent[elem].transformations) max_floor -= len(self.transformations) for k in range(max_floor): self.transformations", "None: self.catalog[elem] = None elif method == \"robust_zscore\": self.catalog[elem] =", "vDataFrame self.parent See Also -------- vDataFrame[].decode : Encodes the vColumn", "if elem[1] != None ] ), ) cmax = \"DECODE({},", "normalizing the data (Z-Score), if the absolute value of the", "feature by evaluating some conditions. vDataFrame[].discretize : Discretizes the vColumn.", "with its abstractions. Parameters ---------- alias: str vColumn alias. transformations:", "Also -------- vDataFrame[].bar : Draws the Bar Chart of vColumn", "= \"SELECT {} AS {}, {} AS ord, {}::int AS", "pre_comp store_usage = executeSQL( \"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}\".format( bin_spatial_to_str(self.category(), self.alias),", "--------------------------------------------------------------------------- Applies the absolute value function to the input vColumn.", "warning_message = \"The method 'robust_zscore' is available only if the", "VERTICAPY_OPTIMAL_H_TABLE\".format( min_date, self.alias, self.alias, self.parent.__genSQL__() ) query = \"SELECT COUNT({})", "element (ex: top1 for the mode) topk_percent : kth most", "bool = True): \"\"\" --------------------------------------------------------------------------- Slices and transforms the vColumn", "str = \"zscore\", by: list = [], return_trans: bool =", "Computes the vColumn descriptive statistics. \"\"\" return self.describe(method=\"categorical\", max_cardinality=k) #", "n ) title = \"Reads {} {} largest elements.\".format(self.alias, n)", "the distinct categories of {}.\".format(self.alias), method=\"fetchall\", ) return [item for", "if some specific records are in the vDataFrame. \"\"\" if", "FROM {} LIMIT 1\".format( new_column.format(self.alias), self.parent.__genSQL__() ), print_time_sql=False, ) except:", "percentile = 0.25) AS 'approx_25%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile", "Freedman Diaconis [2 * IQR / n ** (1 /", "- 1], result[i], result[i - 1], result[i] ) trans +=", "lower bound quantile(alpha) and as upper bound quantile(1-alpha) if 'use_threshold'", "expr = [\"DECODE({}\"] text_info = \"\\n\" for k in range(len(distinct_elements)):", "to the vColumn {} using the following mapping:{}\".format( self.alias, text_info", "Interval width of the vColumns 1 and 2 bars. It", "\"\"\" --------------------------------------------------------------------------- Returns the distinct categories of the vColumn. Returns", "'order_by' must be a list of at least one element", "'{label}'\" ] expr = \"CASE WHEN \" + \" WHEN", "2) + 2)), 1e-99, ) fd = max(2.0 * (vColumn_075", "datetime.date] = \"\", end_date: Union[str, datetime.datetime, datetime.date] = \"\", plot_median:", "the vColumn \" \"or simply because of ambiguous columns naming.\\nBy", "step, ax=ax, **style_kwds, ) # ---# def product(self): \"\"\" ---------------------------------------------------------------------------", "occurent element. See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "-------- vDataFrame.fill_outliers : Fills the outliers in the vColumn. vDataFrame.outliers", "following mapping:{}\".format( self.alias, text_info ) ) return self.parent # ---#", "dropna, [bool]), (\"n\", n, [int, float])]) if n == 1:", "elements.\".format(n, self.alias) return to_tablesample(query, title=title) # ---# def numh(self, method:", "if index < 0: index += self.parent.shape()[0] query = \"SELECT", "bool, optional If set to True, the lowest element of", "{} to normalize.\".format( by[0] ), method=\"fetchall\", ) cmin = \"DECODE({},", "\"\", area: bool = False, step: bool = False, ax=None,", "doesn't allow slicing having steps different than 1.\" ) index_stop", "for item in result], \"count\": [int(item[1]) for item in result],", "h, floor_end ), \"varchar\", \"text\", ) else: trans = (\"FLOOR({})", "_repr_html_(self): return self.head(limit=verticapy.options[\"max_rows\"])._repr_html_() # ---# def __setattr__(self, attr, val): self.__dict__[attr]", "specific vColumn. \"\"\" return self.parent.aggregate(func=func, columns=[self.alias]).transpose() agg = aggregate #", "sine sqrt : arithmetic square root tan : trigonometric tangent", "(\"nbins\", nbins, [float, int]), ] ) if by: self.parent.are_namecols_in(by) by", "else \"\" ) upper_when = ( \"WHEN {} > {}", "occurent element. Parameters ---------- dropna: bool, optional If set to", "using the Z-Score (avg and std). (x - avg) /", "6, new_category: str = \"Others\", RFmodel_params: dict = {}, response:", "- {}) / {}({})\".format( self.alias, avg, \"NULLIFZERO\" if (nullifzero) else", "--------------------------------------------------------------------------- Python object which that stores all user transformations. If", "input aggregations. \"\"\" return self.aggregate([\"min\"]).values[self.alias][0] # ---# def mode(self, dropna:", "ax = self.parent.search( \"{} = '{}'\".format(self.parent[by].alias, column) )[self.alias].density( bandwidth=bandwidth, kernel=kernel,", "specific TS field from the vColumn. \"\"\" check_types( [ (\"length\",", "-------- vDataFrame.eval : Evaluates a customized expression. \"\"\" check_types([(\"name\", name,", "\"min\", \"max\"]) index = result.values[\"index\"] result = result.values[self.alias] elif (method", "see utilities.tablesample. See Also -------- vDataFrame.iv_woe : Computes the Information", "colors = gen_colors() if not xlim: xmin = self.min() xmax", "-------- vDataFrame[].str_count : Computes the number of matches for the", "(1 meaning global outlier). \"\"\" if isinstance(method, str): method =", ": Draws the Bar Chart of the input vColumns based", "Returns the tail of the vColumn. Parameters ---------- limit: int,", "---# def isdate(self): \"\"\" --------------------------------------------------------------------------- Returns True if the vColumn", "round(h, 2) elif h > 0.0001: h = round(h, 4)", "was discretized.\".format(self.alias) ) return self.parent # ---# def distinct(self, **kwargs):", "cat_priority: list = [], ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws", "Example: Write {\"n_estimators\": 20, \"max_depth\": 10} to train a Random", "h = self.numh() else: h = (self.max() - self.min()) *", "to False else the lower and upper ZScores. threshold: float,", "/ binary / spatial / uuid / undefined Returns -------", ": Returns the n smallest elements in the vColumn. \"\"\"", "method: str, optional Method to use to normalize. zscore :", "str, optional Response vColumn when method is set to 'smart'.", "of the vColumn. Parameters ---------- limit: int, optional Number of", "ORDER BY verticapy_agg DESC\".format( self.alias, bin_spatial_to_str(self.category(), self.alias), self.alias, kwargs[\"agg\"], self.parent.__genSQL__(),", "self.apply( func=\"(CASE WHEN ({} BETWEEN {} AND {}) THEN {}", "GROUP BY 1 ORDER BY 2 DESC LIMIT {}) VERTICAPY_SUBTABLE", "return self.category() == \"date\" # ---# def isin(self, val: list,", "else: xlim_tmp = [] model = KernelDensity( name, bandwidth=bandwidth, kernel=kernel,", "self.parent.__add_to_history__( \"[AsType]: The vColumn {} was converted to {}.\".format( self.alias,", "vColumn. \"\"\" return self.iloc(limit=limit, offset=-1) # ---# def topk(self, k:", "\"{}\", result[i - 1], result[i], result[i - 1], result[i] )", "( \"method\", method, [ \"auto\", \"mode\", \"0ifnull\", \"mean\", \"avg\", \"median\",", "READ_TREE(USING PARAMETERS model_name = '{}', tree_id = {}, format =", "to the next whole number ln : natural logarithm log", "vDataFrame input aggregations. \"\"\" return self.aggregate([\"stddev\"]).values[self.alias][0] stddev = std #", "= True, alpha: float = 0.05 ): \"\"\" --------------------------------------------------------------------------- Drops", ": Returns the vColumn database type. \"\"\" return self.transformations[-1][2] #", "\"\"\" --------------------------------------------------------------------------- Computes the optimal vColumn bar width. Parameters ----------", "bin_spatial_to_str(self.category()), \", \".join( [ \"'{}'\".format(str(elem).replace(\"'\", \"''\")) for elem in distinct", "elem in result if elem[1] != None ] ), )", "0: if nbins <= 0: h = self.numh() else: h", "def __init__( self, alias: str, transformations: list = [], parent=None,", "\"sign\", \"sin\", \"sinh\", \"sqrt\", \"tan\", \"tanh\", ], ), (\"x\", x,", "correlated features. use_numbers_as_suffix: bool, optional Uses numbers as suffix instead", "the different records. For example, to check if Badr and", "if 'use_threshold' is set to False else the lower and", "---# def mean_encode(self, response: str): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn", "or (upper != None), ParameterError( \"At least 'lower' or 'upper'", "\"{}({}, {})\".format(func.upper(), \"{}\", x) return self.apply(func=expr) # ---# def astype(self,", "optional The vColumn to use to compute the aggregation. h:", "data using the input expression. \"\"\" self.parent.filter(\"{} IS NOT NULL\".format(self.alias))", "\"median\") or isinstance(val, float): category, ctype = \"float\", \"float\" elif", "optional Number of elements to display. Returns ------- tablesample An", "you'll be able to solve this issue.\" ) self.add_copy(new_name) parent", "--------------------------------------------------------------------------- Normalizes the input vColumns using the input method. Parameters", "impute the vColumn. method: dict, optional Method to use to", "a response. \"\"\" check_types( [ (\"prefix\", prefix, [str]), (\"prefix_sep\", prefix_sep,", "), (\"return_enum_trans\", return_enum_trans, [bool]), ] ) method = method.lower() if", "element of the list will be included. right: bool, optional", "data (Z-Score), if the absolute value of the record is", ": Encodes the vColumn with user defined Encoding. vDataFrame[].discretize :", "nbins: int, optional Maximum number of nbins used for the", "---# def rename(self, new_name: str): \"\"\" --------------------------------------------------------------------------- Renames the vColumn", "value: str = \"\"): \"\"\" --------------------------------------------------------------------------- Replaces the regular expression", "Sets the method to 'numerical' if the vColumn is numerical", "close_l, close_r = \"<=\", \"<\", \"[\", \"[\" if idx ==", "method == \"minmax\": self.catalog[\"min\"] = 0 self.catalog[\"max\"] = 1 self.parent.__add_to_history__(", "self.parent.format_colnames(of) from verticapy.plot import hist return hist(self, method, of, max_cardinality,", "vColumn. One vDataFrame can have multiple children vColumns whereas one", "total # ---# def min(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn", "Encodes the vColumn with user defined Encoding. vDataFrame[].get_dummies : Encodes", "the mean encoding of a response. \"\"\" if self.category() in", "Returns the n smallest elements in the vColumn. Parameters ----------", "vColumn most occurent elements. \"\"\" if \"agg\" not in kwargs:", "axes object \"\"\" columns = [self.alias] check = True if", "sign sin : trigonometric sine sinh : hyperbolic sine sqrt", "data stored in Vertica, taking advantage Vertica’s # speed and", "Parameters ---------- dropna: bool, optional If set to True, NULL", "return trans else: self.transformations += [trans] sauv = {} for", "for elem in self.transformations] total = self.count() if method not", "be empty.\" ) conditions, column = [], self.alias for idx", "of the dummies. prefix_sep: str, optional Prefix delimitor of the", "optional Lower bound. upper: float, optional Upper bound. Returns -------", "the vColumn element.\", method=\"fetchfirstelem\", ) else: return getattr(self, index) #", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" check_types([(\"dropna\", dropna,", "expected store usage (unit: b). Returns ------- int vColumn expected", "_verticapy_row_nb_ IN ({})\".format( \", \".join([\"1\"] + nth_elems + [str(count)]) )", "- index_start if limit <= 0: limit = 0 limit", "to True, the lowest element of the list will be", "= self.parent force_columns = [ column for column in self.parent._VERTICAPY_VARIABLES_[\"columns\"]", "max_floor = len(self.parent[response].transformations) - len( self.transformations ) for k in", "return self.iloc(limit=limit, offset=-1) # ---# def topk(self, k: int =", "< {}\".format( self.alias, result[\"avg\"][0], result[\"std\"][0], threshold ) ) else: p_alpha,", "ORDER BY 2 DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY split_value::float\".format(", "'alpha' parameter. alpha: float, optional Number representing the outliers threshold.", "(or approximate cardinality). See Also -------- vDataFrame.aggregate : Computes the", "learning features. It supports the # entire data science life", "name[1:-1], new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] self.parent.__add_to_history__( \"[Add Copy]: A copy", "of discretization using the method 'smart'.\" ) self.parent.are_namecols_in(response) response =", "discretized.\".format(self.alias) ) return self.parent # ---# def distinct(self, **kwargs): \"\"\"", "vDataFrame self.parent See Also -------- vDataFrame.case_when : Creates a new", "ValueError(\"Division by 0 is forbidden !\") return self.apply(func=\"{} / ({})\".format(\"{}\",", "with {} as Response Column.\".format( self.alias, response ) ) if", "value function to the input vColumn. Returns ------- vDataFrame self.parent", "index < 0: index += self.parent.shape()[0] query = \"SELECT {}{}", "str(distinct_elements[k]).replace('\"', \"_\") ) ) name = ( name.replace(\" \", \"_\")", "'topk' method. new_category: str, optional The name of the merging", "non-missing elements cvar : conditional value at risk dtype :", "return self.aggregate([\"sem\"]).values[self.alias][0] # ---# def skewness(self): \"\"\" --------------------------------------------------------------------------- Aggregates the", "def str_contains(self, pat: str): \"\"\" --------------------------------------------------------------------------- Verifies if the regular", "new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] self.parent.__add_to_history__( \"[Add Copy]: A copy of", "of the vColumn 'of'. q% : q Quantile of the", "], ), (\"expr\", expr, [str]), (\"by\", by, [list]), (\"order_by\", order_by,", "TS. start_date: str / date, optional Input Start Date. For", "self.alias, dtype, self.alias, self.parent.__genSQL__(), self.alias ) executeSQL(query, title=\"Testing the Type", "{} for elem in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias])", "the 'topk' method. new_category: str, optional The name of the", "\"\"\" --------------------------------------------------------------------------- Verifies if the regular expression is in each", "elem in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) total =", "vColumn using 'skewness'. Returns ------- float skewness See Also --------", "\"\"\" return self.parent.aggregate(func=func, columns=[self.alias]).transpose() agg = aggregate # ---# def", "title = \"Reads {} {} largest elements.\".format(self.alias, n) return to_tablesample(query,", "Replaces the regular expression matches in each of the vColumn", "{} AND {})\".format(self.alias, p_alpha, p_1_alpha) ) return self.parent # ---#", "optional If set to True, the Median will be drawn.", "1) and (self.parent[by[0]].nunique() < 50): try: if fun == \"MEDIAN\":", "to normalize. zscore : Normalization using the Z-Score (avg and", "|_) # \\/ (/_| | |(_(_|| \\/ # / #", "float/str median See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "of parameter breaks must be equal to the length of", "assert k >= 2, ParameterError( \"Parameter 'k' must be greater", "\"regr_avgx\": {}, \"regr_avgy\": {}, \"regr_count\": {}, \"regr_intercept\": {}, \"regr_r2\": {},", "else: query = \"SELECT {} FROM (SELECT {} AS {},", "bool, optional If set to True, NULL values will not", "vColumn. The aggregations used are the median and two input", "values will not be considered during the computation. Returns -------", ") warnings.warn(warning_message, Warning) else: distinct_elements = self.distinct() expr = [\"DECODE({}\"]", "= self.parent.format_colnames(response) assert self.parent[response].isnum(), TypeError( \"The response column must be", "rule. Parameters ---------- length: int Slice size. unit: str, optional", "to the data. # # # Modules # # Standard", "splits when 'method' is set to 'smart'. A RF Regressor", "bool = False, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the", "cbrt : cube root ceil : value up to the", "[bool]), (\"h\", h, [int, float]), (\"response\", response, [str]), (\"nbins\", nbins,", "MIN({}) AS min, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.25) AS", "-------- vDataFrame[].describe : Computes the vColumn descriptive statistics. \"\"\" return", "in by]) ) if (by) else \"\" ) order_by_ts =", "equal to 2.\" ) assert len(breaks) == len(labels) + 1", "] expr = \"CASE WHEN \" + \" WHEN \".join(conditions)", ") result = [elem[0] for elem in result] elif self.isnum()", "use to partition the data. bandwidth: float, optional The bandwidth", "0\".format( func.replace(\"{}\", self.alias), self.parent.__genSQL__(), self.alias, ), \"apply_test_feature\", ) category =", "(use_numbers_as_suffix) else '\"{}{}\"'.format( prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) name =", "iqr : interquartile range kurtosis : kurtosis jb : Jarque-Bera", "pre_comp assert self.isnum() or self.isdate(), ParameterError( \"numh is only available", "str = \"\", max_cardinality: int = 6, h: float =", "\"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp store_usage = executeSQL( \"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}\".format(", "vColumn. vDataFrame[].label_encode : Encodes the vColumn with Label Encoding. vDataFrame[].mean_encode", "self.parent.__genSQL__(), query ) query_result = executeSQL( query=query, title=\"Computing the descriptive", "def iloc(self, limit: int = 5, offset: int = 0):", "if (isinstance(lower, (float, int))) else \"\" ) upper_when = (", "title=\"Getting the vColumn element.\", method=\"fetchfirstelem\", ) else: return getattr(self, index)", "), \"MIN({}) OVER (PARTITION BY {})\".format( self.alias, \", \".join(by) ),", "input vColumn based on an aggregation. Parameters ---------- by: str,", "of the vColumn 'of'. min : Minimum of the vColumn", "increase the precision but will also increase the time of", "True if len(args) > 0: column = args[0] elif \"column\"", "# ---# def add(self, x: float): \"\"\" --------------------------------------------------------------------------- Adds the", "else: h = (self.max() - self.min()) * 1.01 / nbins", "{} using the MIN and the MAX. MAX = MIN", "is numerical or of type date like. Optimized h will", "(self.category() == \"float\"): trans = ( \"'[' || FLOOR({} /", "- pt_events) * ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS iv FROM", "BETWEEN {} AND {} THEN '[{};{}]' \".format( \"{}\", result[i -", "\"VERTICAPY_NOT_PRECOMPUTED\": return pre_comp assert self.isnum() or self.isdate(), ParameterError( \"numh is", "pass self.parent.__add_to_history__( \"[Discretize]: The vColumn {} was discretized.\".format(self.alias) ) return", "lesser than November 1993 the 3rd. end_date: str / date,", "# ---# def __getitem__(self, index): if isinstance(index, slice): assert index.step", "!= None), ParameterError( \"At least 'lower' or 'upper' must have", "used to compute the best splits when 'method' is set", "(method != \"cat_stats\") or (numcol), ParameterError( \"The parameter 'numcol' must", "x GROUP BY 1\".format( self.alias, y, y, query, ) query", "and mad). (x - median) / (1.4826 * mad) minmax", "= \"Normalize doesn't work on booleans\".format(self.alias) warnings.warn(warning_message, Warning) elif self.isnum():", "median : median min : minimum mode : most occurent", "places sign : arithmetic sign sin : trigonometric sine sinh", "\"COALESCE({}, DECODE({}, {}, NULL))\".format( \"{}\", by[0], \", \".join( [\"{}, {}\".format(elem[0],", "()) AS pt_non_events, events / NULLIFZERO(SUM(events) OVER ()) AS pt_events", "the vColumn with One-Hot Encoding. vDataFrame[].label_encode : Encodes the vColumn", "vColumn is numerical. See Also -------- vDataFrame[].isbool : Returns True", "if \"count\" in sauv: self.catalog[\"count\"] = int(sauv[\"count\"]) + total self.catalog[\"percent\"]", "float]), ( \"method\", method, [\"auto\", \"smart\", \"same_width\", \"same_freq\", \"topk\"], ),", "prefix, str(distinct_elements[k]).replace('\"', \"_\") ) ) assert not (self.parent.is_colname_in(name)), NameError( f\"A", "---------- ts: str TS (Time Series) vColumn to use to", "[tuple]), ( \"start_date\", start_date, [str, datetime.datetime, datetime.date, int, float], ),", "statistical aggregations: min, max, median, unique... depending on the input", "result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values p_alpha, p_1_alpha = ( -threshold *", "\"int\") # ---# def iv_woe(self, y: str, nbins: int =", "time series rule. \"\"\" return self.apply(func=\"DATE_PART('{}', {})\".format(field, \"{}\")) # ---#", "{}.\".format(self.alias) tail = to_tablesample( \"SELECT {} AS {} FROM {}{}", "start_date, end_date, plot_median, ax=ax, **style_kwds, ) # ---# def rename(self,", "in the final generated SQL code. Note: Dropping a vColumn", ") return self.parent one_hot_encode = get_dummies # ---# def head(self,", "/ / / # \\ / / / # \\_______/", "pie_type: str, optional The type of pie chart. auto :", "else \"\" if index < 0: index += self.parent.shape()[0] query", "# | \\/ / / / # |______ / /", "display. Returns ------- tablesample An object containing the result. For", "the histogram of the vColumn based on an aggregation. Parameters", "of the vDataFrame parent and do not apply it. This", "delattr(self.parent, self.alias) except: self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"] += [self.alias] if add_history: self.parent.__add_to_history__( \"[Drop]:", "bool True if the vColumn is boolean. See Also --------", "code. Note: Dropping a vColumn can make the vDataFrame \"heavier\"", "---------- val: list List of the different records. For example,", "False otherwise. Returns ------- bool True if the vColumn is", "of points to use to evaluate the approximate density function.", "parameter to pass to the geopandas plot function. For more", "of the ceiling. Returns ------- vDataFrame self.parent See Also --------", "\"numh is only available on type numeric|date\" ) if self.isnum():", "max_cardinality + 1) query = \"WITH vdf_table AS (SELECT *", "This parameter is used for testing purpose. Returns ------- vDataFrame", ": Maximum of the vColumn 'of'. sum : Sum of", "# _ # \\ / _ __|_. _ _ |_)", "= \"<\", \"<=\", \"]\", \"]\" else: op1, op2, close_l, close_r", "method: str = \"density\", of: str = \"\", max_cardinality: int", "when method is set to 'smart'. return_enum_trans: bool, optional Returns", ">= 2, ParameterError( \"Parameter 'k' must be greater or equals", "optional vColumn to use to partition the data. bandwidth: float,", "compute the Equal Frequency discretization\" ) total, query, nth_elems =", "verticapy.plot import pie return pie( self, method, of, max_cardinality, h,", "and Mode for the categorical vColumns. bfill : Back Propagation", "str): \"\"\" --------------------------------------------------------------------------- Extracts the regular expression in each record", "\"''\")) if elem[0] != None else \"NULL\", elem[1] if elem[1]", "order_by]) new_column = \"COALESCE({}, LAST_VALUE({} IGNORE NULLS) OVER ({} ORDER", "= self.parent.get_columns(), 0 for column in all_cols: try: if (quote_ident(column)", "\"float\") ] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( \"[Mean Encode]: The vColumn {}", "of the vColumn. \"\"\" return self.iloc(limit=limit) # ---# def hist(", "the input vColumn. \"\"\" check_types( [ (\"breaks\", breaks, [list]), (\"labels\",", "if the vDataFrame has been transformed multiple times, so it's", "\"\"\" check_types([(\"x\", x, [int, float])]) assert x != 0, ValueError(\"Division", "can be the result of using previously the method on", "else \"\" return self.aggregate(func=[prefix + \"{}%\".format(x * 100)]).values[self.alias][ 0 ]", "= self.parent.format_colnames(column) columns += [column] if not (\"cmap\" in kwargs):", "attr, val): self.__dict__[attr] = val # # Methods # #", "and do not apply it. This parameter is very useful", "to compute the aggregation. max_cardinality: int, optional Maximum number of", "query = \"SELECT * FROM {} WHERE {} IS NOT", "\"SELECT {}, {} FROM {} LIMIT 1\".format( avg, stddev, self.parent.__genSQL__()", "self.catalog[\"max\"] = 1 self.parent.__add_to_history__( \"[Normalize]: The vColumn '{}' was normalized", "--------------------------------------------------------------------------- Aggregates the vColumn using 'var' (Variance). Returns ------- float", "- limit) title = \"Reads {}.\".format(self.alias) tail = to_tablesample( \"SELECT", "of the vColumn. vDataFrame[].tail : Returns the tail of the", "method, [str]), (\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"nbins\",", "optional The integer k of the 'topk' method. new_category: str,", "/ QUARTER / SECOND / TIME ZONE / TIMEZONE_HOUR /", "schema = \"public\" tmp_view_name = gen_tmp_name(schema=schema, name=\"view\") tmp_model_name = gen_tmp_name(schema=schema,", "1) else \"LIMIT {}\".format(k) dropna = \" WHERE {} IS", "use to partition the data. method: str, optional The method", "self.parent.__genSQL__() ) p_alpha, p_1_alpha = executeSQL( query=query, title=\"Computing the quantiles", "cos : trigonometric cosine cosh : hyperbolic cosine cot :", "aggregate the data. count : Number of elements. density :", "\"Parameter 'response' can not be empty in case of discretization", "\\ \\ \\'/ \\ \\ \\____ \\ \\ \\/\\ \\", "the time of the learning and scoring phases. xlim: tuple,", "DESC {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.shape()[0], self.parent.__genSQL__(), dropna, self.alias, topk,", "bin_spatial_to_str(self.category(), self.alias), self.alias, kwargs[\"agg\"], self.parent.__genSQL__(), self.alias, ) query_result = executeSQL(", "AS _verticapy_row_nb_ FROM {} WHERE {} IS NOT NULL) VERTICAPY_SUBTABLE", "vDataFrame[].extract : Extracts the regular expression in each record of", "= False, n: int = 1): \"\"\" --------------------------------------------------------------------------- Returns the", "bar(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds) # ---#", "of the mean skewness : skewness sum : sum std", "DESC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias, n ) title =", "vDataFrame. Parameters ---------- name: str Name of the copy. Returns", "Also -------- vDataFrame.boxplot : Draws the Box Plot of the", "sauv: if \"top\" in elem: if \"percent\" in elem: self.catalog[elem]", "is boolean, False otherwise. Returns ------- bool True if the", "element). 0ifnull : 0 when the vColumn is null, 1", "List of the different aggregation. aad : average absolute deviation", "[(\"{}\", self.ctype(), self.category())] self.transformations += [(func, ctype, category)] self.parent.__update_catalog__(erase=True, columns=[self.alias])", "if the vColumn is boolean. vDataFrame[].isdate : Returns True if", "x, [int, float])]) return self.apply(func=\"{} * ({})\".format(\"{}\", x)) # ---#", "only have one parent. catalog: dict, optional Catalog where each", "\"<=\", \"[\" elif idx == 0: op1, close_l = \"<\",", "Start of the slicing. step: int Size of the slicing.", "mad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'mad' (median absolute", "---# def __len__(self): return int(self.count()) # ---# def __nonzero__(self): return", "jb : Jarque-Bera index mad : median absolute deviation max", ") return parent # ---# def drop_outliers( self, threshold: float", "natural logarithm log : logarithm log10 : base 10 logarithm", "self.aggregate([\"avg\"]).values[self.alias][0] mean = avg # ---# def bar( self, method:", "= \"COALESCE({}, '{}')\".format(\"{}\", val) elif expr: new_column = \"COALESCE({}, {})\".format(\"{}\",", "{} IS NOT NULL \".format(self.alias) if (dropna) else \" \"", "self.parent.__genSQL__(), where, self.alias, n ), title=\"Computing the mode.\", method=\"fetchall\", )", "std). (x - avg) / std robust_zscore : Normalization using", "index.start if not (isinstance(index_start, int)): index_start = 0 if index_start", "executeSQL( \"SELECT {}, AVG({}), STDDEV({}) FROM {} GROUP BY {}\".format(", "with a user-specified rule. Parameters ---------- val: int/float/str, optional Value", "Returns the vColumn expected store usage (unit: b). Returns -------", "\\/ # _ # \\ / _ __|_. _ _", "Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"sum\"]).values[self.alias][0] # ---#", "= 0 THEN 0 ELSE (pt_non_events - pt_events) * ZEROIFNULL(LN(pt_non_events", "method, of, max_cardinality, h, donut, rose, ax=None, **style_kwds, ) #", "[int])]) self.parent.are_namecols_in(y) y = self.parent.format_colnames(y) assert self.parent[y].nunique() == 2, TypeError(", "where = \" WHERE {} IS NOT NULL \".format(self.alias) if", "vColumn can make the vDataFrame \"heavier\" if it is used", "{} LIMIT 10\".format( self.parent.__genSQL__(force_columns=force_columns) ), print_time_sql=False, ) self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias) delattr(self.parent, self.alias)", "\"\"\" --------------------------------------------------------------------------- Fills the vColumns outliers using the input method.", ") if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) columns = [self.alias,", "Drops the input vColumns from the vDataFrame. \"\"\" check_types([(\"add_history\", add_history,", "str New type. Returns ------- vDataFrame self.parent See Also --------", "float]), (\"h\", h, [int, float]), (\"cat_priority\", cat_priority, [list]), ] )", "---# def describe( self, method: str = \"auto\", max_cardinality: int", "the vColumn using 'var' (Variance). Returns ------- float var See", "index_stop < 0: index_stop += self.parent.shape()[0] limit = index_stop -", "------- float skewness See Also -------- vDataFrame.aggregate : Computes the", "--------------------------------------------------------------------------- Returns the k most occurent elements and their distributions", "/ / # \\ / / / # \\_______/ /", "return self.aggregate([\"skewness\"]).values[self.alias][0] skew = skewness # ---# def slice(self, length:", "See Also -------- vDataFrame.analytic : Adds a new vColumn to", "copy_trans] raise QueryError(\"{}\\nAn Error happened during the filling.\".format(e)) if total", "/ std robust_zscore : Normalization using the Robust Z-Score (median", "than the threshold it will be considered as an outlier.", "x: float): \"\"\" --------------------------------------------------------------------------- Multiplies the vColumn by the input", "\"categorical\", \"cat_stats\"]), (\"max_cardinality\", max_cardinality, [int, float]), (\"numcol\", numcol, [str]), ]", "Also -------- vDataFrame.astype : Converts the vColumns to the input", "] ) if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot", ") ) return self.parent except Exception as e: raise QueryError(", "\"\", start_date: Union[str, datetime.datetime, datetime.date] = \"\", end_date: Union[str, datetime.datetime,", "for item in result], \"percent\": [float(round(item[2], 3)) for item in", "not (method == \"categorical\")) or ( method == \"is_numeric\" ):", "different aggregation. aad : average absolute deviation approx_unique : approximative", "the vColumn with Label Encoding. vDataFrame[].mean_encode : Encodes the vColumn", "\"\"\" --------------------------------------------------------------------------- Returns True if the vColumn is numerical, False", "optimized number of nbins will be computed. h: float, optional", "Maximum number of the vColumn distinct elements to be used", "mad), \"float\", \"float\", ) ] else: warning_message = \"Can not", "self.parent.are_namecols_in(column) column = self.parent.format_colnames(column) columns += [column] if not (\"cmap\"", "---# def distinct(self, **kwargs): \"\"\" --------------------------------------------------------------------------- Returns the distinct categories", "types to varchar. same_freq : Computes bins with the same", "< 0: offset = max(0, self.parent.shape()[0] - limit) title =", "{}) OVER () FROM {} LIMIT 1\".format( alpha, self.alias, alpha,", "self.parent[numcol].category() in (\"float\", \"int\"), TypeError( \"The column 'numcol' must be", "import ts_plot return ts_plot( self, ts, by, start_date, end_date, area,", "the data (Z-Score), if the absolute value of the record", "\\/_/ \\/_/ \\/_/ \\/_/ # # # ---# class vColumn(str_sql):", "is greater than November 1993 the 3rd. plot_median: bool, optional", "solve this issue.\" ) self.add_copy(new_name) parent = self.drop(add_history=False) parent.__add_to_history__( \"[Rename]:", "be converted to {}\".format( e, self.alias, dtype ) ) #", "Usage of the vColumn {}.\".format(self.alias), method=\"fetchfirstelem\", ) self.parent.__update_catalog__( {\"index\": [\"store_usage\"],", "by: str = \"\", bandwidth: float = 1.0, kernel: str", "vColumn_max = ( result[0], result[3], result[4], result[6], result[7], ) elif", "Fills the outliers in the vColumn. vDataFrame.outliers : Adds a", "\", \".join(by) ), ) else: avg, stddev = ( \"AVG({})", "value to a specified number of decimal places sign :", "APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.9) AS 'approx_90%', MAX({3}{4})", "picked or computed) nbins: int, optional Number of nbins. If", "or numerical. by: str, optional vColumn to use to partition", "next whole number ln : natural logarithm log : logarithm", "in range(len(breaks) - 1): first_elem, second_elem = breaks[idx], breaks[idx +", "# ---# def nsmallest(self, n: int = 10): \"\"\" ---------------------------------------------------------------------------", "categories will be filtered. ax: Matplotlib axes object, optional The", "to aggregate the data. count : Number of elements. density", "above. The idea is simple: instead of moving # data", "not normalize {} using a Z-Score - The Standard Deviation", "method == \"zscore\": self.catalog[\"mean\"] = 0 self.catalog[\"std\"] = 1 elif", "\"mode\" total = self.count() if (method == \"mode\") and (val", "h, [int, float]), (\"cat_priority\", cat_priority, [list]), ] ) if by:", "in zscore|minmax\" warnings.warn(warning_message, Warning) return self mad, med = self.aggregate([\"mad\",", "\"float\", \"float\", ) ] elif method == \"robust_zscore\": if n", "unit: str, optional Slice size unit. For example, it can", "If the function has two arguments (example, power or mod),", "+ total) / self.parent.shape()[0] ) except: pass total = int(total)", "the input vColumns using the input method. Parameters ---------- method:", "matches in each of the vColumn records by an input", "== \"same_freq\") or ( self.isnum() and method == \"smart\" ):", "isinstance(by, str): by = [by] check_types( [ (\"method\", method, [\"zscore\",", "if self.category() in [\"date\", \"float\"]: warning_message = ( \"label_encode is", "percentile = 0.9) AS 'approx_90%', MAX({3}{4}) AS max FROM vdf_table\"\"\".format(", "= self.count() if (method == \"mode\") and (val == None):", "else: if verticapy.options[\"print_info\"]: print(\"Nothing was filled.\") self.transformations = [elem for", "_ _| _ /~\\ _ |. # |_)\\/ |_)(_|(_|| \\_/|_|(_|||", "will filter the data when 'ts' is lesser than November", "Bar Chart of vColumn based on an aggregation. \"\"\" check_types(", "/ / /\\ ___\\ /\\ __ \\ /\\ \\ /\\", "in kwargs): from verticapy.plot import gen_colors kwargs[\"color\"] = gen_colors()[0] if", "NULLIFZERO(SUM(events) OVER ()) AS pt_events FROM ({}) x\".format( self.alias, query,", "\"fd\", \"auto\"])] ) method = method.lower() if method == \"auto\":", "= \"(SELECT {} FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE\".format( self.alias, self.parent.__genSQL__(),", "of elements. density : Percentage of the distribution. mean :", "data. Parameters ---------- new_name: str The new vColumn alias. Returns", "as e: raise QueryError( \"{}\\nError when applying the func 'x", "NULLIFZERO(SUM(non_events) OVER ()) AS pt_non_events, events / NULLIFZERO(SUM(events) OVER ())", "of the above. The idea is simple: instead of moving", "the vDataFrame expected store usage. \"\"\" pre_comp = self.parent.__get_catalog_value__(self.alias, \"store_usage\")", ": Draws the Bar Chart of vColumn based on an", "numcol, self.alias ) values = to_tablesample(query, title=title).values elif ( ((distinct_count", "text_info ) ) return self.parent # ---# def mad(self): \"\"\"", "IS NOT NULL GROUP BY 1 ORDER BY 2 DESC", "defined. numerical : Uses popular numerical aggregations during the computation.", "nth_elems = nb, [], [] while total < int(float(count /", "element.\", method=\"fetchfirstelem\", ) else: return getattr(self, index) # ---# def", "must be one of the following: CENTURY / DAY /", "self.ctype(), self.category())] self.transformations += final_transformation sauv = {} for elem", "int(math.floor(math.log(count, 2) + 2)), 1e-99, ) fd = max(2.0 *", "alpha], [self.alias]) .transpose() .values[self.alias] ) self.parent.filter( \"({} BETWEEN {} AND", "IS NOT NULL\".format(self.alias)) return self.parent # ---# def fill_outliers( self,", "NULLIFZERO(pt_events))) END AS woe, CASE WHEN non_events = 0 OR", "--------------------------------------------------------------------------- Encodes the vColumn using a user-defined encoding. Parameters ----------", "= \"SELECT {}{} FROM {}{} OFFSET {} LIMIT 1\".format( self.alias,", "\"rose\"]), ] ) donut = True if pie_type == \"donut\"", "each record of the vColumn. The vColumn will be transformed.", "str, optional Prefix of the dummies. prefix_sep: str, optional Prefix", "and offers beautiful graphical options. # # VerticaPy aims to", "using. Returns ------- tablesample An object containing the result. For", "Converts the vColumn to the input type. Parameters ---------- dtype:", "numeric|date\" ) if self.isnum(): result = ( self.parent.describe( method=\"numerical\", columns=[self.alias],", "self.apply( func=\"TIME_SLICE({}, {}, '{}', '{}')\".format( \"{}\", length, unit.upper(), start_or_end )", "otherwise. Returns ------- bool True if the vColumn category is", "int(float(count / int(nbins))) * int(nbins): nth_elems += [str(total)] total +=", "( \"({} - {}) / {}({})\".format( \"{}\", avg, \"NULLIFZERO\" if", "\"\"\" --------------------------------------------------------------------------- Drops the vColumn from the vDataFrame. Dropping a", "AS {}, {} AS ord, {}::int AS {} FROM {}\".format(", "lesser than the lower bound to the lower bound itself", "vDataFrame.aggregate : Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"variance\"]).values[self.alias][0]", "Also -------- vDataFrame[].fill_outliers : Fills the vColumn outliers using the", "[], self.distinct() if len(cat) == 1: lp, rp = \"(\",", "the parameter is empty or invalid. max_cardinality: int, optional Maximum", "self.isbool(): warning_message = \"Normalize doesn't work on booleans\".format(self.alias) warnings.warn(warning_message, Warning)", ": Extracts the regular expression in each record of the", "--------------------------------------------------------------------------- Returns the category of the vColumn. The category will", "the optimal h.\", method=\"fetchrow\", ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max", "partition the TS. start_date: str / date, optional Input Start", "for elem in copy_trans] raise QueryError(\"{}\\nAn Error happened during the", "the statics of {} partitioned by {}.\".format( numcol, self.alias )", "max_cardinality, [int, float]), (\"h\", h, [int, float]), (\"cat_priority\", cat_priority, [list]),", "and max). (x - min) / (max - min) by:", "self.parent.shape()[0] self.catalog[\"percent\"] = 100 self.parent.__add_to_history__( \"[Label Encoding]: Label Encoding was", "str = \"density\", of: str = \"\", max_cardinality: int =", "else: final_transformation = [ ( \"({} - {}) / ({})\".format(\"{}\",", "(\"of\", of, [str]), (\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int,", "self.transformations = [elem for elem in copy_trans] raise QueryError(\"{}\\nAn Error", "already computed aggregations to gain in performance. The catalog will", "result[i][2] = None avg = \"DECODE({}, {}, NULL)\".format( by[0], \",", "WHERE {} > {})\".format( self.parent.__genSQL__(), self.alias, self.alias, p_alpha, self.alias, self.alias,", "Creates a copy of the vColumn. \"\"\" check_types([(\"new_name\", new_name, [str])])", "apply_fun(self, func: str, x: float = 2): \"\"\" --------------------------------------------------------------------------- Applies", "= \"WITH vdf_table AS (SELECT * FROM {}) {}\".format( self.parent.__genSQL__(),", "(or approximate quantile). See Also -------- vDataFrame.aggregate : Computes the", "LIMIT {} OFFSET {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), limit,", "of type date like. Optimized h will be computed if", "FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE\".format( self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index_start, limit,", "{} THEN {} \".format(\"{}\", lower, lower) if (isinstance(lower, (float, int)))", "aggregation. Parameters ---------- by: str, optional vColumn to use to", "method to use to discretize the vColumn. auto : Uses", "to the vColumn. \"\"\" check_types( [ ( \"func\", func, [", "Looks if some specific records are in the vColumn and", "or of type date like. Optimized h will be computed", "FROM (SELECT {}, COUNT(*) AS _verticapy_cnt_ FROM {}{}GROUP BY {}", "# # VerticaPy aims to do all of the above.", "on an aggregation. Parameters ---------- method: str, optional The method", "Returns the transformation instead of the vDataFrame parent and do", "mean encoding of a response. \"\"\" if self.category() in [\"date\",", "name = quote_ident(name.replace('\"', \"_\")) assert name.replace('\"', \"\"), EmptyParameter( \"The parameter", "isinstance(index, int): cast = \"::float\" if self.category() == \"float\" else", "bool, optimal If set to True, the method will return", "computed) ax: Matplotlib axes object, optional The axes to plot", "# _____________ / / # \\ / / / #", ".transpose() .values[self.alias] ) self.parent.filter( \"({} BETWEEN {} AND {})\".format(self.alias, p_alpha,", "elif method == \"minmax\": self.catalog[elem] = (sauv[elem] - sauv[\"min\"]) /", "= 6, h: float = 0, pie_type: str = \"auto\",", "False, ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the Time Series", "[str, datetime.datetime, datetime.date, int, float], ), ( \"end_date\", end_date, [str,", "'same_width' for numerical vColumns, cast the other types to varchar.", "optional Value to use to impute the vColumn. method: dict,", "self.catalog[elem] = sauv[elem] return self.parent # ---# def geo_plot(self, *args,", "the Robust Z-Score (median and mad). (x - median) /", "in (\"None\", None)) else \" WHERE {} = '{}'\".format( bin_spatial_to_str(self.category(),", "# ---# def mad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using", "utilities.tablesample. See Also -------- vDataFrame[].nlargest : Returns the n largest", "\"{}({})\".format(func.upper(), \"{}\") else: expr = \"{}({}, {})\".format(func.upper(), \"{}\", x) return", ".replace(\"'\", \"_\") ) expr = \"DECODE({}, '{}', 1, 0)\".format( \"{}\",", "returns the new vDataFrame of the search. Parameters ---------- val:", ": Average. median : median. mode : mode (most occurent", "model.fit(tmp_view_name, [self.alias], response) query = [ \"(SELECT READ_TREE(USING PARAMETERS model_name", "Aggregates the vColumn using 'kurtosis'. Returns ------- float kurtosis See", "at risk dtype : vColumn type iqr : interquartile range", "AND {} THEN '[{};{}]' \".format( \"{}\", result[i - 1], result[i],", "\"top\") if pre_comp != \"VERTICAPY_NOT_PRECOMPUTED\": if not (dropna) and (pre_comp", "executeSQL( \"SELECT {}, {} FROM {} LIMIT 1\".format( cmax, cmin,", "self.min() table = \"(SELECT DATEDIFF('second', '{}'::timestamp, {}) AS {} FROM", "+= [sum(result[\"non_events\"])] result.values[\"events\"] += [sum(result[\"events\"])] result.values[\"pt_non_events\"] += [\"\"] result.values[\"pt_events\"] +=", ": product range : difference between the max and the", "result[0][0] if not (dropna): n = \"\" if (n ==", "to order the data. The vColumn type must be date", "apply the function: x -> x^2 + 2 use \"POWER({},", "using 'sum'. Returns ------- float sum See Also -------- vDataFrame.aggregate", "null !\".format( self.alias ) warnings.warn(warning_message, Warning) return self elif method", "---# def abs(self): \"\"\" --------------------------------------------------------------------------- Applies the absolute value function", "of the first element (Constant Interpolation). mean : Average. median", "of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import bar return", "express or implied. # See the License for the specific", "vColumn_075, vColumn_max = ( result[0], result[3], result[4], result[6], result[7], )", "FROM {}) {}\".format( self.parent.__genSQL__(), query ) query_result = executeSQL( query=query,", "---# def sem(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'sem'", "{}, \"regr_avgy\": {}, \"regr_count\": {}, \"regr_intercept\": {}, \"regr_r2\": {}, \"regr_slope\":", "the numerical and Mode for the categorical vColumns. bfill :", "forbidden !\") return self.apply(func=\"{} / ({})\".format(\"{}\", x)) # ---# def", "series rule. \"\"\" return self.apply(func=\"DATE_PART('{}', {})\".format(field, \"{}\")) # ---# def", "if h > 0.01: h = round(h, 2) elif h", "self.alias) query = \"SELECT {} AS {}, {} AS ord,", "--------------------------------------------------------------------------- Draws the range plot of the vColumn. The aggregations", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"sem\"]).values[self.alias][0] #", "transformations: list = [], parent=None, catalog: dict = {} ):", "filled.\".format( total, self.alias, conj, ) ) else: if verticapy.options[\"print_info\"]: print(\"Nothing", "PARAMETERS percentile = 0.75) AS 'approx_75%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS", "------- float/str median See Also -------- vDataFrame.aggregate : Computes the", ": Normalization using the Z-Score (avg and std). (x -", "the vColumn records. The vColumn will be transformed. Parameters ----------", ") ) else: for k in range(max_floor): self.transformations += [(\"{}\",", "= (\"{} || ''\", \"varchar\", \"text\") if (self.isnum() and method", ") warnings.warn(warning_message, Warning) return self.parent if isinstance(val, str): val =", "{} IS NOT NULL ORDER BY {} ASC LIMIT {}\".format(", "\"\"\" check_types( [ (\"breaks\", breaks, [list]), (\"labels\", labels, [list]), (\"include_lowest\",", "\"auto\", max_cardinality: int = 6, numcol: str = \"\" ):", "the vDataFrame input aggregations. \"\"\" return self.aggregate([\"aad\"]).values[self.alias][0] # ---# def", "sine sinh : hyperbolic sine sqrt : arithmetic square root", "breaks: list, labels: list = [], include_lowest: bool = True,", "gen_colors from matplotlib.lines import Line2D colors = gen_colors() if not", "ax=None, **style_kwds, ): \"\"\" --------------------------------------------------------------------------- Draws the Time Series of", "__ ______ ______ __ __ __ __ __ __ __", "idx == 0: op1, close_l = \"<\", \"]\" if labels:", "the parameter 'x' will represent the number of seconds, otherwise", "str): pie_type = pie_type.lower() check_types( [ (\"method\", method, [str]), (\"of\",", "# ---# def add_copy(self, name: str): \"\"\" --------------------------------------------------------------------------- Adds a", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'kurtosis'. Returns ------- float", "# ---# def mean_encode(self, response: str): \"\"\" --------------------------------------------------------------------------- Encodes the", "applying the func 'x -> {}' to '{}'\".format( e, func.replace(\"{}\",", "OR events = 0 THEN 0 ELSE (pt_non_events - pt_events)", "title=\"Computing the different aggregations.\", method=\"fetchall\", ) for idx, elem in", "Normalizes the input vColumns using the input method. Parameters ----------", "USING PARAMETERS percentile = 0.25) AS Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS", "({})\".format(\"{}\", x)) # ---# def drop(self, add_history: bool = True):", "**kwargs Any optional parameter to pass to the geopandas plot", "MONTH / QUARTER / SECOND / TIME ZONE / TIMEZONE_HOUR", "self.nunique(True) > 6) else \"mode\" total = self.count() if (method", "and 1 (1 meaning global outlier). \"\"\" if isinstance(method, str):", "higher than the upper bound to the upper bound itself.", "float skewness See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "- 1] (n being the vColumn cardinality). Returns ------- vDataFrame", "SQL code generation will be slower if the vDataFrame has", "ParameterError( \"The parameter 'numcol' must be a vDataFrame column if", "to_replace: str, value: str = \"\"): \"\"\" --------------------------------------------------------------------------- Replaces the", "str vColumn category. See Also -------- vDataFrame[].ctype : Returns the", "NULL\".format(self.alias)) return self.parent # ---# def fill_outliers( self, method: str", "{} AS {}, {} AS verticapy_agg FROM {} WHERE {}", "float]), (\"nbins\", nbins, [int, float]), ] ) if of: self.parent.are_namecols_in(of)", "(sauv[elem] - sauv[\"mean\"]) / sauv[ \"std\" ] elif method ==", "vDataFrame. \"\"\" check_types([(\"add_history\", add_history, [bool])]) try: parent = self.parent force_columns", "self.catalog[\"percent\"] = 100 self.parent.__add_to_history__( \"[Label Encoding]: Label Encoding was applied", "of the vColumn {} named {} was added to the", "== len(labels) + 1 or not (labels), ParameterError( \"Length of", "parent=self.parent, transformations=transformations, catalog={ \"min\": 0, \"max\": 1, \"count\": self.parent.shape()[0], \"percent\":", "def mad(self): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'mad' (median", "(float, int))) else \"\" ) func = \"(CASE {}{}ELSE {}", "= {self.alias: val} return self.parent.isin(val) # ---# def isnum(self): \"\"\"", ") ] elif method == \"robust_zscore\": if n > 0:", "column if the method is 'cat_stats'\" ) distinct_count, is_numeric, is_date", "\".join(by) ), ) else: avg, stddev = ( \"AVG({}) OVER", ") custom_lines += [ Line2D( [0], [0], color=updated_dict(param, style_kwds, idx)[\"color\"],", "self.parent.are_namecols_in([elem for elem in order_by] + by) by = self.parent.format_colnames(by)", "response column must be numerical to use a mean encoding\"", "{column} AND {column} {op2} '{second_elem}' THEN '{label}'\" ] expr =", "str_contains(self, pat: str): \"\"\" --------------------------------------------------------------------------- Verifies if the regular expression", "(), PERCENTILE_CONT(1 - {}) WITHIN GROUP (ORDER BY {}) OVER", "/ # ______ / / # \\ / / /", "1.0, kernel: str = \"gaussian\", nbins: int = 200, xlim:", "if \"agg\" not in kwargs: query = \"SELECT {} AS", "transforms the vColumn using a time series rule. Parameters ----------", "def add_copy(self, name: str): \"\"\" --------------------------------------------------------------------------- Adds a copy vColumn", "bound to the upper bound itself. Parameters ---------- lower: float,", "method = method.lower() if self.isnum() and method == \"smart\": schema", "not (result) else result[0][0] if not (dropna): n = \"\"", "variance Other aggregations could work if it is part of", "vDataFrame. \"\"\" if isinstance(val, str) or not (isinstance(val, Iterable)): val", "bijection from the different categories to [0, n - 1]", "(No h will be picked or computed) h: float, optional", "by if method in [\"ffill\", \"pad\", \"bfill\", \"backfill\"]: all_partition +=", "0.05, ): \"\"\" --------------------------------------------------------------------------- Fills the vColumns outliers using the", "{}) / {}({})\".format( self.alias, avg, \"NULLIFZERO\" if (nullifzero) else \"\",", "val: list List of the different records. For example, to", "FROM (SELECT {} AS {}, {} AS verticapy_agg FROM {}", "+ 1) query = \"WITH vdf_table AS (SELECT * FROM", "\"end_date\", end_date, [str, datetime.datetime, datetime.date, int, float], ), (\"plot_median\", plot_median,", "Warning) return self elif method == \"minmax\": if n ==", "STDDEV({}) FROM {} GROUP BY {}\".format( by[0], self.alias, self.alias, self.parent.__genSQL__(),", "represents Q1. approx: bool, optional If set to True, the", "if method.lower() == \"sturges\": best_h = sturges elif method.lower() in", "self.transformations += [(expr, \"int\", \"int\")] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.catalog[\"count\"] = self.parent.shape()[0]", "(self.isnum() and self.nunique(True) > 6) else \"mode\" total = self.count()", "IS NULL\".format(self.alias) if (category in (\"None\", None)) else \" WHERE", ": Returns the head of the vColumn. \"\"\" return self.iloc(limit=limit,", "xmin, xmax = xlim custom_lines = [] columns = self.parent[by].distinct()", "!= \"numerical\")) or not (is_numeric) or (method == \"categorical\") ):", "2 in case of discretization using the method 'topk'\" )", "the same number of elements. same_width : Computes regular width", "i in range(1, n): trans += \"WHEN {} BETWEEN {}", "index, self.alias: result}) for elem in values: for i in", "): \"\"\" --------------------------------------------------------------------------- Fills the vColumns outliers using the input", ") method = method.lower() self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) nullifzero, n", "getattr(self, index) # ---# def __len__(self): return int(self.count()) # ---#", "their distributions as percents. Parameters ---------- k: int, optional Number", "1] ... ELSE argv[n] END Returns ------- vDataFrame self.parent See", "AS {}, COUNT(*) AS _verticapy_cnt_, 100 * COUNT(*) / {}", "consider when drawing the box plot. The other categories will", "\\____ \\ \\ \\_\\ \\ \\ \\ \\-./\\ \\ \\", "bool, optional Returns the transformation instead of the vDataFrame parent", "y, y, self.parent.__genSQL__(), ) query = \"SELECT {}, MIN(ord) AS", "the 'alpha' parameter. alpha: float, optional Number representing the outliers", "# ---# def head(self, limit: int = 5): \"\"\" ---------------------------------------------------------------------------", "FROM {} WHERE {} IS NOT NULL GROUP BY 1)", "# \\ / # \\ / # \\/ # _", "AS (SELECT * FROM {}) {}\".format( self.parent.__genSQL__(), \" UNION ALL", "be dropped. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.fill_outliers", "self.parent See Also -------- vDataFrame[].decode : Encodes the vColumn using", "utilities.tablesample. See Also -------- vDataFrame[].head : Returns the head of", "'breaks' must be greater or equal to 2.\" ) assert", "if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import bar", "-------- vDataFrame.isin : Looks if some specific records are in", "200, xlim: tuple = None, ax=None, **style_kwds, ): \"\"\" ---------------------------------------------------------------------------", "a list of at least one element to use to", "ctype # ---# def date_part(self, field: str): \"\"\" --------------------------------------------------------------------------- Extracts", "elem in self.transformations] total = self.count() if method not in", "def iv_woe(self, y: str, nbins: int = 10): \"\"\" ---------------------------------------------------------------------------", "(date, datetime, timestamp...) or numerical. q: tuple, optional Tuple including", "\"mod\", \"pow\", \"round\"): expr = \"{}({})\".format(func.upper(), \"{}\") else: expr =", "# ---# def __len__(self): return int(self.count()) # ---# def __nonzero__(self):", "import gen_cmap kwargs[\"cmap\"] = gen_cmap()[0] else: if not (\"color\" in", "\"\"\" check_types( [ (\"prefix\", prefix, [str]), (\"prefix_sep\", prefix_sep, [str]), (\"drop_first\",", "matches in each of the vColumn record by an input", "[ \"'{}'\".format(str(elem).replace(\"'\", \"''\")) for elem in distinct ] ), bin_spatial_to_str(self.category()),", "\\ \\_\\ \\ \\ \\ \\-./\\ \\ \\ \\ \\-.", "whole number ln : natural logarithm log : logarithm log10", "(\"method\", method, [\"auto\", \"numerical\", \"categorical\", \"cat_stats\"]), (\"max_cardinality\", max_cardinality, [int, float]),", "of values used to cut the vColumn. labels: list, optional", "method=\"fetchall\", ) result = [elem[0] for elem in result] except:", "# ---# def isdate(self): \"\"\" --------------------------------------------------------------------------- Returns True if the", "elif method in (\"mean\", \"avg\", \"median\"): fun = \"MEDIAN\" if", "var See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", "Slice size. unit: str, optional Slice size unit. For example,", "how often they occur, and other statistical information. Parameters ----------", "copy_name: str = \"\"): \"\"\" --------------------------------------------------------------------------- Applies a function to", "max_floor ) except: pass max_floor -= len(self.transformations) if copy_name: self.add_copy(name=copy_name)", "\"null\", \"mean\"]), (\"alpha\", alpha, [int, float]), (\"use_threshold\", use_threshold, [bool]), (\"threshold\",", "--------------------------------------------------------------------------- Draws the bar chart of the vColumn based on", "[list]), ] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from", "dummy to avoid the creation of correlated features. use_numbers_as_suffix: bool,", "result[\"std\"][0] + result[\"avg\"][0], ) else: query = \"SELECT PERCENTILE_CONT({}) WITHIN", "= self.parent.format_colnames(by) columns = [self.alias, by] else: columns = [self.alias]", "func=\"REGEXP_REPLACE({}, '{}', '{}')\".format( \"{}\", to_replace.replace(\"'\", \"''\"), value.replace(\"'\", \"''\") ) )", "+= [(\"{}\", self.ctype(), self.category())] self.transformations += [(new_column, ctype, category)] try:", "SQL used to transform the vColumn. The function variable must", "if the response is numerical (except ints and bools), a", "vColumns to use to sort the data when using TS", "# ---# def str_count(self, pat: str): \"\"\" --------------------------------------------------------------------------- Computes the", "label = labels[idx] else: label = f\"{close_l}{first_elem};{second_elem}{close_r}\" conditions += [", "--------------------------------------------------------------------------- Fills the vColumns outliers using the input method. Parameters", "Also -------- vDataFrame.donut : Draws the donut chart of the", "pat: str): \"\"\" --------------------------------------------------------------------------- Computes the number of matches for", "axes to plot on. **style_kwds Any optional parameter to pass", "expected store usage. \"\"\" pre_comp = self.parent.__get_catalog_value__(self.alias, \"store_usage\") if pre_comp", "response, [str])]) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) assert self.parent[response].isnum(), TypeError( \"The", "100 * COUNT(*) / {} AS percent FROM {}{} GROUP", "\"mode\", \"0ifnull\", \"mean\", \"avg\", \"median\", \"ffill\", \"pad\", \"bfill\", \"backfill\", ],", "see utilities.tablesample. See Also -------- vDataFrame.aggregate : Computes the vDataFrame", "sum See Also -------- vDataFrame.aggregate : Computes the vDataFrame input", ": Percentage of the distribution. mean : Average of the", "**kwargs) # ---# def get_dummies( self, prefix: str = \"\",", "self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import pie return pie(", "iv_woe.\".format(y) ) response_cat = self.parent[y].distinct() response_cat.sort() assert response_cat == [0,", "The idea is simple: instead of moving # data around", "elements. density : Percentage of the distribution. mean : Average", "keep after the comma. Returns ------- vDataFrame self.parent See Also", "import range_curve_vdf return range_curve_vdf( self, ts, q, start_date, end_date, plot_median,", "+ result, } if ((is_date) and not (method == \"categorical\"))", "on an aggregation. Parameters ---------- by: str, optional vColumn to", "= (sauv[elem] - sauv[\"mean\"]) / sauv[ \"std\" ] elif method", "str_replace(self, to_replace: str, value: str = \"\"): \"\"\" --------------------------------------------------------------------------- Replaces", "elem[1] != None ] ), ) stddev = \"DECODE({}, {},", "FROM vdf_table\"\"\".format( category, self.alias, self.parent.shape()[0], numcol, cast, ) tmp_query +=", "new vColumn labeled with 0 and 1 (1 meaning global", "drawn. ax: Matplotlib axes object, optional The axes to plot", "will look like: even: CASE ... WHEN vColumn = argv[2", "(ex: 50% to get the median). It can also be", "(SELECT * FROM {}) {}\".format( self.parent.__genSQL__(), query ) query_result =", "{}, {} AS verticapy_agg FROM {} WHERE {} IS NOT", "query=query, title=\"Computing the optimized histogram nbins using Random Forest.\", method=\"fetchall\",", "str_sql): func = str(func) check_types([(\"func\", func, [str]), (\"copy_name\", copy_name, [str])])", "(\"max_cardinality\", max_cardinality, [int, float]), (\"h\", h, [int, float]), (\"cat_priority\", cat_priority,", "top1 for the mode) topk_percent : kth most occurent element", "[str]), (\"by\", by, [str]), (\"start_date\", start_date, [str, datetime.datetime, datetime.date]), (\"end_date\",", "to be used as categorical (No h will be picked", ": Computes the vColumn descriptive statistics. \"\"\" return self.describe(method=\"categorical\", max_cardinality=k)", "brackets {}. For example to apply the function: x ->", "specified name. \\u26A0 Warning : SQL code generation will be", "BY {}))\".format( \"{}\", \"{}\", partition_by, order_by_ts ) if method in", "stddev ) else: final_transformation = [ ( \"({} - {})", "Deviation). Returns ------- float aad See Also -------- vDataFrame.aggregate :", ") assert self.isnum() or self.isdate(), TypeError( \"cut only works on", "usage. See Also -------- vDataFrame.expected_store_usage : Returns the vDataFrame expected", ": Computes the vDataFrame input aggregations. \"\"\" check_types( [ (\"method\",", "chart. auto : Regular pie chart. donut : Donut chart.", "\"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using 'min' (Minimum). Returns -------", "PERCENTILE_CONT(1 - {}) WITHIN GROUP (ORDER BY {}) OVER ()", "Aggregates the vColumn using 'aad' (Average Absolute Deviation). Returns -------", "the function has two arguments (example, power or mod), 'x'", "2 to be used as categorical (No h will be", "vColumn is boolean. See Also -------- vDataFrame[].isdate : Returns True", "max_cardinality, [int, float]), (\"numcol\", numcol, [str]), ] ) method =", "of the vColumn. \"\"\" check_types([(\"new_name\", new_name, [str])]) old_name = quote_ident(self.alias)", "{})\".format(len(distinct_elements)) self.transformations += [(expr, \"int\", \"int\")] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.catalog[\"count\"] =", "auto : Regular pie chart. donut : Donut chart. rose", "if the vColumn category is date. \"\"\" return self.category() in", "verticapy.stats as st return self.apply(func=st.decode(str_sql(\"{}\"), *argv)) # ---# def density(", ": arithmetic sign sin : trigonometric sine sinh : hyperbolic", "print_time_sql=False, ) self.parent._VERTICAPY_VARIABLES_[\"columns\"].remove(self.alias) delattr(self.parent, self.alias) except: self.parent._VERTICAPY_VARIABLES_[\"exclude_columns\"] += [self.alias] if", "max_floor: max_floor = len(self.parent[elem].transformations) max_floor -= len(self.transformations) for k in", "'{}') > 0\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def str_count(self,", "sauv: self.catalog[\"count\"] = sauv[\"count\"] self.catalog[\"percent\"] = ( 100 * sauv[\"count\"]", "n, [int, float])]) return self.apply(func=\"ROUND({}, {})\".format(\"{}\", n)) # ---# def", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "Number of elements to display. offset: int, optional Number of", "to 2 in case of discretization using the method 'topk'\"", "min : minimum mode : most occurent element percent :", "\\ / _ __|_. _ _ |_) # \\/ (/_|", "mode) topk_percent : kth most occurent element density unique :", "= self.count() nb = int(float(count / int(nbins))) assert nb !=", "\\u26A0 Warning : SQL code generation will be slower if", "[list]), (\"h\", h, [list, float, int]), ] ) if by:", "computed if the parameter is empty or invalid. max_cardinality: int,", ") cast = \"::int\" if (self.parent[numcol].isbool()) else \"\" query, cat", "vDataFrame[].slice : Slices the vColumn using a time series rule.", "the input number of digits after the comma. Parameters ----------", "), \"float\", \"float\", ) ] elif method == \"robust_zscore\": if", "applied to the vColumn {}\\n{} feature{}created: {}\".format( self.alias, len(all_new_features), conj,", "by: str = \"\", h: float = 0, max_cardinality: int", "result[\"std\"][0] + result[\"avg\"][0], threshold * result[\"std\"][0] + result[\"avg\"][0], ) else:", "END odd : CASE ... WHEN vColumn = argv[2 *", "\"winsorize\": self.clip(lower=p_alpha, upper=p_1_alpha) elif method == \"null\": self.apply( func=\"(CASE WHEN", "Also -------- vDataFrame.bar : Draws the Bar Chart of the", "to draw the Plot. start_date: str / date, optional Input", "must be binary to use iv_woe.\".format(y) ) response_cat = self.parent[y].distinct()", "-------- vDataFrame.plot : Draws the time series. \"\"\" check_types( [", "def tail(self, limit: int = 5): \"\"\" --------------------------------------------------------------------------- Returns the", "input quantiles. Parameters ---------- ts: str TS (Time Series) vColumn", "avg, stddev, self.parent.__genSQL__() ), print_time_sql=False, ) except: avg, stddev =", "list = [], order_by: list = [], ): \"\"\" ---------------------------------------------------------------------------", "vColumn cardinality). Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode", "/ float / text / binary / spatial / uuid", "smallest elements.\".format(n, self.alias) return to_tablesample(query, title=title) # ---# def numh(self,", "True, the Median will be drawn. ax: Matplotlib axes object,", "{}, {} FROM {} LIMIT 1\".format( cmax, cmin, self.parent.__genSQL__() ),", "x: float Input number. Returns ------- vDataFrame self.parent See Also", "(ORDER BY {}) OVER () FROM {} LIMIT 1\".format( alpha,", "if return_trans: return \"({} - {}) / ({})\".format(self.alias, med, mad)", "use to aggregate the data. count : Number of elements.", "== 1) and (self.parent[by[0]].nunique() < 50): try: result = executeSQL(", "\\/ (/_| | |(_(_|| \\/ # / # VerticaPy is", "new_column = \"COALESCE({}, DECODE({}, {}, NULL))\".format( \"{}\", by[0], \", \".join(", "nbins, [int])]) self.parent.are_namecols_in(y) y = self.parent.format_colnames(y) assert self.parent[y].nunique() == 2,", ") start_or_end = \"START\" if (start) else \"END\" return self.apply(", "), ] ax.set_title(\"KernelDensity\") ax.legend( custom_lines, columns, title=by, loc=\"center left\", bbox_to_anchor=[1,", "mode (only missing values).\\nNothing was filled.\".format( self.alias ) warnings.warn(warning_message, Warning)", "from the vColumn. \"\"\" check_types( [ (\"length\", length, [int, float]),", "offset: int, optional Number of elements to skip. Returns -------", "or invalid. max_cardinality: int/tuple, optional Maximum number of distinct elements", "Divides the vColumn by the input element. Parameters ---------- x:", "the following list: [\"Fouad\", \"Badr\"] Returns ------- vDataFrame The vDataFrame", "= \"::int\" if (self.parent[numcol].isbool()) else \"\" query, cat = [],", "[ (\"length\", length, [int, float]), (\"unit\", unit, [str]), (\"start\", start,", "min sem : standard error of the mean skewness :", ": Computes the vDataFrame input aggregations. \"\"\" return self.aggregate([\"avg\"]).values[self.alias][0] mean", "str) or not (isinstance(cat_priority, Iterable)): cat_priority = [cat_priority] check_types( [", "\"NULL\", elem[1] if elem[1] != None else \"NULL\", ) for", "the vColumn. vDataFrame.outliers : Adds a new vColumn labeled with", "= vColumn( name, parent=self.parent, transformations=transformations, catalog={ \"min\": 0, \"max\": 1,", "------- ax Matplotlib axes object See Also -------- vDataFrame.donut :", "'approx_50%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.75) AS 'approx_75%',", "--------------------------------------------------------------------------- Adds the input element to the vColumn. Parameters ----------", "Note: Dropping a vColumn can make the vDataFrame \"heavier\" if", "return self.apply(func=\"TIMESTAMPADD(SECOND, {}, {})\".format(x, \"{}\")) else: return self.apply(func=\"{} + ({})\".format(\"{}\",", "+ result[\"avg\"][0], ) else: query = \"SELECT PERCENTILE_CONT({}) WITHIN GROUP", "== \"0ifnull\": category, ctype = \"int\", \"bool\" else: category, ctype", "self.alias, query, ) title = \"Computing WOE & IV of", "column in enumerate(columns): param = {\"color\": colors[idx % len(colors)]} ax", "\"({} - {}) / {}({} - {})\".format( \"{}\", cmin, \"NULLIFZERO\"", "str Regular expression to replace. value: str, optional New value.", "Parameters ---------- prefix: str, optional Prefix of the dummies. prefix_sep:", "sturges elif method.lower() in (\"freedman_diaconis\", \"fd\"): best_h = fd else:", "(len(by) == 1) and (self.parent[by[0]].nunique() < 50): try: if fun", "{}{} GROUP BY {} ORDER BY _verticapy_cnt_ DESC {}\".format( bin_spatial_to_str(self.category(),", "kernel = kernel.lower() from verticapy.learn.neighbors import KernelDensity schema = verticapy.options[\"temp_schema\"]", "def apply(self, func: str, copy_name: str = \"\"): \"\"\" ---------------------------------------------------------------------------", "new categories. If empty, names will be generated. include_lowest: bool,", "THEN '[{};{}]' \".format( \"{}\", result[i - 1], result[i], result[i -", "------- bool True if the vColumn category is date. See", "''\", \"varchar\", \"text\") if (self.isnum() and method == \"same_freq\") or", "if func not in (\"log\", \"mod\", \"pow\", \"round\"): expr =", "{} \".format(\"{}\", upper, upper) if (isinstance(upper, (float, int))) else \"\"", "Prefix delimitor of the dummies. drop_first: bool, optional Drops the", "== \"is_numeric\" ): self.parent.__update_catalog__({\"index\": index, self.alias: result}) for elem in", "verticapy.plot import gen_colors kwargs[\"color\"] = gen_colors()[0] if not (\"legend\" in", "{} LIMIT 1\".format( new_column.format(self.alias), self.parent.__genSQL__() ), print_time_sql=False, ) except: new_column", "[\"sturges\", \"freedman_diaconis\", \"fd\", \"auto\"])] ) method = method.lower() if method", "20, \"max_depth\": 10} to train a Random Forest with 20", "parent = self.drop(add_history=False) parent.__add_to_history__( \"[Rename]: The vColumn {} was renamed", "_verticapy_cnt_ DESC {}\".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.shape()[0], self.parent.__genSQL__(), dropna, self.alias,", "({})\".format(self.alias, med, mad) else: final_transformation = [ ( \"({} -", "* result[\"std\"][0] + result[\"avg\"][0], threshold * result[\"std\"][0] + result[\"avg\"][0], )", "**style_kwds) # ---# def iloc(self, limit: int = 5, offset:", "be used as categorical. The less frequent elements will be", "See Also -------- vDataFrame[].head : Returns the head of the", "[bool]), (\"threshold\", threshold, [int, float]), ] ) if use_threshold: result", "for the median) prod : product range : difference between", "(\"return_enum_trans\", return_enum_trans, [bool]), (\"h\", h, [int, float]), (\"response\", response, [str]),", "] ) if use_threshold: result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values self.parent.filter( \"ABS({}", "result = self.aggregate(func=[\"std\", \"avg\"]).transpose().values self.parent.filter( \"ABS({} - {}) / {}", "if not (\"figsize\" in kwargs): kwargs[\"figsize\"] = (14, 10) return", "method. Parameters ---------- method: str, optional Method to use to", "= \"SELECT {} AS index, non_events, events, pt_non_events, pt_events, CASE", "the alias of one of the dummies ({name}).\\n\" \"It can", "{}\".format( self.parent.__genSQL__(), query ) query_result = executeSQL( query=query, title=\"Computing the", "\" if verticapy.options[\"print_info\"]: print(\"{} element{}filled.\".format(total, conj)) self.parent.__add_to_history__( \"[Fillna]: {} {}", "to the input vColumns. vDataFrame.applymap : Applies a function to", "/ DAY / DECADE / DOQ / DOW / DOY", "\"\"\" check_types([(\"dropna\", dropna, [bool]), (\"n\", n, [int, float])]) if n", "\"\"\" check_types([(\"x\", x, [int, float])]) return self.apply(func=\"{} * ({})\".format(\"{}\", x))", "are in the vColumn. You can write the following list:", ": Fills the vColumn outliers using the input method. \"\"\"", "be empty in case of discretization using the method 'smart'.\"", "the best splits when 'method' is set to 'smart'. A", "elements. \"\"\" if \"agg\" not in kwargs: query = \"SELECT", "= self.parent.format_colnames(ts) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot", "unique : cardinality (count distinct) var : variance Other aggregations", "FROM {} GROUP BY {};\".format( by[0], fun, self.alias, self.parent.__genSQL__(), by[0]", "of the vColumn record by an input value. The vColumn", "else \" WHERE {} = '{}'\".format( bin_spatial_to_str(self.category(), self.alias), category, )", "# VerticaPy is a Python library with scikit-like functionality for", "axes object See Also -------- vDataFrame.plot : Draws the time", ") ) # ---# def apply_fun(self, func: str, x: float", "WHEN vColumn = argv[2 * i] THEN argv[2 * i", "[list]), (\"order_by\", order_by, [list]), ] ) method = method.lower() self.parent.are_namecols_in([elem", "optional Number representing the outliers threshold. Values lesser than quantile(alpha)", "float])]) if self.isdate(): return self.apply(func=\"TIMESTAMPADD(SECOND, -({}), {})\".format(x, \"{}\")) else: return", "\"std\" ] elif method == \"minmax\": self.catalog[elem] = (sauv[elem] -", "query = \"SELECT {} FROM (SELECT {}, ROW_NUMBER() OVER (ORDER", "limit = 0 limit = \" LIMIT {}\".format(limit) else: limit", "for sublist in query_result for item in sublist] # ---#", "optional Input End Date. For example, time = '03-11-1993' will", "WHERE {} < {}) UNION ALL (SELECT AVG({}) FROM vdf_table", "name, new_vColumn) setattr(self.parent, name[1:-1], new_vColumn) self.parent._VERTICAPY_VARIABLES_[\"columns\"] += [name] self.parent.__add_to_history__( \"[Add", "= False, ): \"\"\" --------------------------------------------------------------------------- Encodes the vColumn with the", "from verticapy.plot import hist return hist(self, method, of, max_cardinality, nbins,", "agg = aggregate # ---# def apply(self, func: str, copy_name:", "== []: if fun == \"AVG\": val = self.avg() elif", "to discretize the vColumn. auto : Uses method 'same_width' for", "Clips the vColumn by transforming the values lesser than the", "= 2): \"\"\" --------------------------------------------------------------------------- Applies a default function to the", "self.parent # ---# def geo_plot(self, *args, **kwargs): \"\"\" --------------------------------------------------------------------------- Draws", "{}) {}\".format( self.parent.__genSQL__(), \" UNION ALL \".join(query) ) title =", "be transformed. Parameters ---------- pat: str regular expression. Returns -------", "float = 0, pie_type: str = \"auto\", ax=None, **style_kwds, ):", "return result except: model.drop() raise # ---# def describe( self,", "\".format(self.alias) if (dropna) else \" \" result = executeSQL( \"SELECT", "float vColumn memory usage (byte) See Also -------- vDataFrame.memory_usage :", ": Returns the vColumn most occurent elements. \"\"\" if \"agg\"", "self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.catalog[\"count\"] = self.parent.shape()[0] self.catalog[\"percent\"] = 100 self.parent.__add_to_history__( \"[Label", "= sauv[\"count\"] self.catalog[\"percent\"] = ( 100 * sauv[\"count\"] / self.parent.shape()[0]", "empty, an optimized h will be computed. ax: Matplotlib axes", "\"{}\", h, h, \"{}\", h, h, h, floor_end ), \"varchar\",", "\"\"\" if self.category() in [\"date\", \"float\"]: warning_message = ( \"label_encode", "itself and the values higher than the upper bound to", "selecting it in the final generated SQL code. Note: Dropping", "= catalog[elem] # ---# def __getitem__(self, index): if isinstance(index, slice):", "null, 1 otherwise. expr: str, optional SQL expression. by: list,", "self.count() if (method == \"mode\") and (val == None): val", "str/float/int vColumn nth most occurent element. See Also -------- vDataFrame.aggregate", "vColumns based on an aggregation. \"\"\" check_types( [ (\"by\", by,", "\\/ # / # VerticaPy is a Python library with", "xlim: xmin = self.min() xmax = self.max() else: xmin, xmax", "self.parent[copy_name].transformations += [(func, ctype, category)] self.parent[copy_name].catalog = self.catalog self.parent.__add_to_history__( \"[Apply]:", "if return_trans: return \"({} - {}) / {}({})\".format( self.alias, avg,", "new category : 'Others'. cat_priority: list, optional List of the", "trans += \"WHEN {} BETWEEN {} AND {} THEN '[{};{}]'", "nth most occurent element. Parameters ---------- dropna: bool, optional If", "rose, ax=None, **style_kwds, ) # ---# def plot( self, ts:", "is greater than November 1993 the 3rd. area: bool, optional", "\"{}\\nError when applying the func 'x -> {}' to '{}'\".format(", "|_)(_|(_|| \\_/|_|(_||| # / # ____________ ______ # / __", "---# def isbool(self): \"\"\" --------------------------------------------------------------------------- Returns True if the vColumn", "\"bfill\", \"backfill\"): assert order_by, ParameterError( \"If the method is in", "ASC LIMIT 1\".format( self.alias, self.alias, self.parent.__genSQL__(), where, self.alias, n ),", "cardinality is returned. By setting this parameter to False, the", "For example to apply the function: x -> x^2 +", "the absolute value of the record is greater than the", "ctype = \"float\", \"float\" elif method == \"0ifnull\": category, ctype", "self.apply(func=\"SUBSTR({}, {}, {})\".format(\"{}\", start, step)) # ---# def sub(self, x:", "[(expr, \"int\", \"int\")] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.catalog[\"count\"] = self.parent.shape()[0] self.catalog[\"percent\"] =", "plot. gaussian : Gaussian kernel. logistic : Logistic kernel. sigmoid", "range(max_floor): self.transformations += [(\"{}\", self.ctype(), self.category())] self.transformations += [ (\"AVG({})", "|| ''\", \"varchar\", \"text\") if (self.isnum() and method == \"same_freq\")", "\"\") + prefix_sep.replace('\"', \"_\") if not (prefix) else prefix.replace('\"', \"_\")", "0 for column in all_cols: try: if (quote_ident(column) in func)", "self.alias, self.alias, table ) result = executeSQL( query, title=\"Different aggregations", "1) and (self.parent[by[0]].nunique() < 50): try: result = executeSQL( \"SELECT", "nbins, h, ax=ax, **style_kwds) # ---# def iloc(self, limit: int", "product # ---# def quantile(self, x: float, approx: bool =", "APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.5) AS 'approx_50%', APPROXIMATE_PERCENTILE", "date like (date, datetime, timestamp...) or numerical. q: tuple, optional", ": Logistic kernel. sigmoid : Sigmoid kernel. silverman : Silverman", "mad, med = self.aggregate([\"mad\", \"approx_median\"]).values[self.alias] mad *= 1.4826 if mad", "stddev = self.aggregate([\"avg\", \"std\"]).values[self.alias] if stddev == 0: warning_message =", "\"\"\" --------------------------------------------------------------------------- Returns the vColumn expected store usage (unit: b).", "See Also -------- vDataFrame[].ctype : Returns the vColumn database type.", "than the upper bound to the upper bound itself. Parameters", "sigmoid : Sigmoid kernel. silverman : Silverman kernel. nbins: int,", "vColumn will be transformed. Parameters ---------- field: str The field", "vDataFrame.applymap : Applies a function to all the vColumns. vDataFrame.eval", "elem in sauv: self.catalog[elem] = sauv[elem] return self.parent # ---#", "lp, rp = \"\", \"\" for category in cat: tmp_query", "vColumns using the input method. Parameters ---------- method: str, optional", "elem: if \"percent\" in elem: self.catalog[elem] = sauv[elem] elif elem", "using a time series rule. Parameters ---------- length: int Slice", "number of nbins used for the discretization (must be >", "generated. include_lowest: bool, optional If set to True, the lowest", "vColumn can be seen as one column of that relation.", "check_types([(\"limit\", limit, [int, float]), (\"offset\", offset, [int, float])]) if offset", "--------------------------------------------------------------------------- Drops outliers in the vColumn. Parameters ---------- threshold: float,", "of {} partitioned by {}.\".format( numcol, self.alias ) values =", "compute other vColumns. Parameters ---------- add_history: bool, optional If set", ": Sum of the vColumn 'of'. q% : q Quantile", "FROM {}{}GROUP BY {} ORDER BY _verticapy_cnt_ DESC LIMIT {})", "the vColumn using an input 'quantile'. Parameters ---------- x: float", "to compute the aggregation. h: int/float/tuple, optional Interval width of", "expression is in each of the vColumn records. The vColumn", "vDataFrame input aggregations. \"\"\" check_types( [ (\"method\", method, [\"auto\", \"numerical\",", "will represent the number of seconds, otherwise it will represent", "{} DESC LIMIT {}\".format( self.parent.__genSQL__(), self.alias, self.alias, n ) title", "method == \"smart\" ): n = len(result) trans = \"(CASE", "bool = True): \"\"\" --------------------------------------------------------------------------- Aggregates the vColumn using an", "> 1) or (self.category() == \"float\"): trans = ( \"'['", "number ln : natural logarithm log : logarithm log10 :", "self.alias, self.alias, self.alias, self.alias, table ) result = executeSQL( query,", "vColumn using 'unique' (cardinality). Parameters ---------- approx: bool, optional If", "), \"varchar\", \"text\", ) elif self.isnum() and method == \"same_freq\":", "drop(self, add_history: bool = True): \"\"\" --------------------------------------------------------------------------- Drops the vColumn", "variable must be composed of two flower brackets {}. For", "vColumn category is date. vDataFrame[].isnum : Returns True if the", "kurtosis : kurtosis jb : Jarque-Bera index mad : median", "of the vColumn. vDataFrame[].str_replace : Replaces the regular expression matches", "executeSQL( query, title=\"Computing the top{} categories of {}.\".format( k if", "/ uuid / undefined Returns ------- str vColumn category. See", "the aggregation. h: int/float/tuple, optional Interval width of the vColumns", "only available for categorical variables.\" ) warnings.warn(warning_message, Warning) else: distinct_elements", "be date like (date, datetime, timestamp...) or numerical. q: tuple,", "= 0.9) AS 'approx_90%', MAX({3}{4}) AS max FROM vdf_table\"\"\".format( category,", "See Also -------- vDataFrame[].tail : Returns the a part of", "len(all_new_features), conj, \", \".join(all_new_features) ) + \".\" ) return self.parent", "Returns a part of the vColumn (delimited by an offset", "the distinct categories of the vColumn. Returns ------- list Distinct", "not (isinstance(val, Iterable)): val = [val] val += list(args) check_types([(\"val\",", "str = \"\", prefix_sep: str = \"_\", drop_first: bool =", "the vDataFrame Global Outliers. \"\"\" if isinstance(by, str): by =", "regular expression is in each of the vColumn records. The", "function's performance can drastically decrease. Returns ------- int vColumn cardinality", "[ ( \"func\", func, [ \"abs\", \"acos\", \"asin\", \"atan\", \"cbrt\",", "self.alias, kwargs[\"agg\"], self.parent.__genSQL__(), self.alias, ) query_result = executeSQL( query=query, title=\"Computing", "bool = False, ): \"\"\" --------------------------------------------------------------------------- Discretizes the vColumn using", "---------- argv: object Any amount of expressions. The expression generated", "\"-.\\ \\ # \\ \\ \\'/ \\ \\ \\____ \\", "None: mean_alpha = \"NULL\" self.apply( func=\"(CASE WHEN {} < {}", "vColumn. Returns ------- list Distinct caterogies of the vColumn. See", "for column in order_by]) new_column = \"COALESCE({}, LAST_VALUE({} IGNORE NULLS)", "(\"RFmodel_params\", RFmodel_params, [dict]), (\"return_enum_trans\", return_enum_trans, [bool]), (\"h\", h, [int, float]),", "h, h, floor_end ), \"varchar\", \"text\", ) else: trans =", "the vColumn. Parameters ---------- n: int, optional Offset. Returns -------", "partitioned by {}.\".format( numcol, self.alias ) values = to_tablesample(query, title=title).values", "DESC LIMIT {1})\"\"\".format( self.alias, max_cardinality ) if distinct_count > max_cardinality:", "Returns the vColumn database type. \"\"\" return self.transformations[-1][2] # ---#", "\"\"\" --------------------------------------------------------------------------- Returns the head of the vColumn. Parameters ----------", "to_tablesample(query, title=title) # ---# def numh(self, method: str = \"auto\"):", "\"backfill\"): assert order_by, ParameterError( \"If the method is in ffill|pad|bfill|backfill", "1\".format( new_column.format(self.alias), self.parent.__genSQL__() ), print_time_sql=False, ) except: new_column = \"COALESCE({},", "purpose. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.outliers :", "{} was discretized.\".format(self.alias) ) return self.parent # ---# def distinct(self,", "length, unit.upper(), start_or_end ) ) # ---# def spider( self,", "was deleted from the vDataFrame.\".format(self.alias) ) return parent # ---#", "vDataFrame of the search. See Also -------- vDataFrame.isin : Looks", "_verticapy_cnt_ DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_ ASC LIMIT", "(PARTITION BY {})\".format( self.alias, \", \".join(by) ), ) else: avg,", "int = 10): \"\"\" --------------------------------------------------------------------------- Returns the n largest vColumn", "1993 the 3rd. end_date: str / date, optional Input End", "def nlargest(self, n: int = 10): \"\"\" --------------------------------------------------------------------------- Returns the", "topk most frequent categories and merge the other into one", "best_h = max(math.floor(best_h), 1) return best_h # ---# def nunique(self,", "return_enum_trans=True, )[0].replace(\"{}\", self.alias) query = \"SELECT {} AS {}, {}", "func=\"REGEXP_COUNT({}, '{}')\".format(\"{}\", pat.replace(\"'\", \"''\")) ) # ---# def str_extract(self, pat:", "Parameters ---------- func: str Function to use to transform the", "---# def memory_usage(self): \"\"\" --------------------------------------------------------------------------- Returns the vColumn memory usage.", "0ifnull : 0 when the vColumn is null, 1 otherwise.", "= verticapy.options[\"temp_schema\"] if not (schema): schema = \"public\" tmp_view_name =", "each of the vColumn records by an input value. vDataFrame[].str_slice", "0.75), start_date: Union[str, datetime.datetime, datetime.date] = \"\", end_date: Union[str, datetime.datetime,", "in result] ), ) executeSQL( \"SELECT {} FROM {} LIMIT", "verticapy.options[\"print_info\"]: print(\"Nothing was filled.\") self.transformations = [elem for elem in", "{}))\".format( \"{}\", fun, \"{}\", \", \".join(by) ) elif method in", ") ax.set_xlabel(self.alias) return ax kernel = kernel.lower() from verticapy.learn.neighbors import", "if return_trans: return \"({} - {}) / {}({} - {})\".format(", ": median absolute deviation max : maximum mean : average", "self.parent.__get_last_order_by__(), limit, offset, ), title=title, ) tail.count = self.parent.shape()[0] tail.offset", "/ / # \\_______/ / / # ______ / /", "] if mean_alpha == None: mean_alpha = \"NULL\" if mean_1_alpha", "\"0ifnull\": category, ctype = \"int\", \"bool\" else: category, ctype =", "= method.lower() if method == \"auto\": pre_comp = self.parent.__get_catalog_value__(self.alias, \"numh\")", "\\__| \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\", "self.catalog[\"min\"] = 0 self.catalog[\"max\"] = 1 self.parent.__add_to_history__( \"[Normalize]: The vColumn", "result[idx][0] = ( \"NULL\" if (elem[0] == None) else \"'{}'\".format(str(elem[0]).replace(\"'\",", "= \"COALESCE({}, {}({}) OVER (PARTITION BY {}))\".format( \"{}\", fun, \"{}\",", "tablesample(values) # ---# def discretize( self, method: str = \"auto\",", "field: str The field to extract. It must be one", "vColumn using the mean encoding of a response. \"\"\" import" ]
[ "from datatables import ColumnDT, DataTables from .. import auth from", ": {id}\" @book.route('/post/', methods=['GET', 'POST']) def post_book(): \"\"\" post new", "= int(book[-1].id) + 1 print(f\"id is : {id}\") form =", "be displayed. from flask import render_template, redirect, request, url_for, flash,jsonify,current_app", "from flask import render_template, redirect, request, url_for, flash,jsonify,current_app from flask_login", "# GET parameters params = request.args.to_dict() # instantiating a DataTable", "col name # :param order: asc or desc # :return:", "auth from .. import db from .forms import EditBookForm, HackmdMeta", "form.removed.data book.keepsite = form.keepsite.data db.session.add(book) db.session.commit() flash('Your book data has", "db from .forms import EditBookForm, HackmdMeta # from booktags.db.basemodels import", "%s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n' % (query.statement, query.parameters, query.duration, query.context)) return", "- explicitly cast date to string, so string searching the", "import login_user, logout_user, login_required, current_user from . import book from", "-*- \"\"\" example.py ~~~~~~~~~ A simple command line application to", "# ColumnDT(BookMain.reprint), # ColumnDT(BookMain.removed), # ColumnDT(BookMain.keepsite) ] # defining the", "import EditBookForm, HackmdMeta # from booktags.db.basemodels import Book from booktags.flaskapp.model.models", "form.validate_on_submit(): book.id = form.id.data book.isbn = form.isbn.data book.title_short = form.title_short.data", "form.title_short.data book.title = form.title.data book.catalogue = form.catalogue.data book.cutter = form.cutter.data", "= book.copy_info form.get_link.data = book.get_link form.note.data = book.note form.reprint.data =", "form.title.data = book.title form.catalogue.data = book.catalogue form.cutter.data = book.cutter form.pub_year.data", "form.catalogue.data = book.catalogue form.cutter.data = book.cutter form.pub_year.data = book.pub_year form.copy_info.data", "%fs\\nContext: %s\\n' % (query.statement, query.parameters, query.duration, query.context)) return response @book.route('/',", "data has been updated.', 'success') return redirect(url_for('book.index')) form.id.data = book.id", "import auth from .. import db from .forms import EditBookForm,", "# \"\"\" # # :param field: col name # :param", "response @book.route('/', methods=['GET', 'POST']) def index(): # books=BookMain.get_all_book() query =", "in the table columns = [ # ColumnDT(cast(BookMain.id, db.Integer)), ColumnDT(BookMain.id),", "# ColumnDT(BookMain.removed), # ColumnDT(BookMain.keepsite) ] # defining the initial query", "book.removed = form.removed.data book.keepsite = form.keepsite.data db.session.add(book) db.session.commit() flash('Your book", "is a relative path and # the current directory is", "db.session.commit() flash('Your book data has been updated.', 'success') return redirect(url_for('book.index'))", "book.pub_year form.copy_info.data = book.copy_info form.get_link.data = book.get_link form.note.data = book.note", "form.catalogue.data book.cutter = form.cutter.data book.pub_year = form.pub_year.data book.copy_info = form.copy_info.data", "form.keepsite.data = book.keepsite return render_template('book/edit_book.html', form=form) @book.route('/del/<int:id>', methods=['GET', 'POST']) def", "ColumnDT(BookMain.title_short), ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info) # ColumnDT(BookMain.get_link), # ColumnDT(BookMain.note),", "instantiating a DataTable for the query and table needed rowTable", "equal to how it is presented # in the table", "form.pub_year.data = book.pub_year form.copy_info.data = book.copy_info form.get_link.data = book.get_link form.note.data", "book data :param id: :return: \"\"\" form = EditBookForm() book", "\"\"\" from booktags.vendor.hackmd_meta import get_hackmdmeta form = HackmdMeta() if form.validate_on_submit():", "% (query.statement, query.parameters, query.duration, query.context)) return response @book.route('/', methods=['GET', 'POST'])", ":return: renew query # \"\"\" # books = BookMain.get_all_book() #", "= query.order_by(cast(BookMain.id, db.Integer)).paginate( page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False) books = pagination.items return", "users with DataTables <= 1.10.x.\"\"\" return render_template('book/list_book.html') @book.route('/data', methods=['GET', 'POST'])", "return jsonify(rowTable.output_result()) @book.route('/get/<int:id>', methods=['GET', 'POST']) def get_book(): return f\"Hello book", "type=int) pagination = query.order_by(cast(BookMain.id, db.Integer)).paginate( page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False) books =", "= book.pub_year form.copy_info.data = book.copy_info form.get_link.data = book.get_link form.note.data =", "query.parameters, query.duration, query.context)) return response @book.route('/', methods=['GET', 'POST']) def index():", "%s\\nDuration: %fs\\nContext: %s\\n' % (query.statement, query.parameters, query.duration, query.context)) return response", "# ColumnDT(cast(BookMain.id, db.Integer)), ColumnDT(BookMain.id), ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short), ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year),", "{id}\" @book.route('/hackmdmeta', methods=['GET', 'POST']) def hackmd_meta(): \"\"\" :return: \"\"\" from", "= db.session.query().select_from(BookMain) # GET parameters params = request.args.to_dict() # instantiating", "-*- coding: utf-8 -*- \"\"\" example.py ~~~~~~~~~ A simple command", "params = request.args.to_dict() # instantiating a DataTable for the query", "# :param order: asc or desc # :return: renew query", "return render_template('book/edit_book.html', form=form) @book.route('/edit/<int:id>', methods=['GET', 'POST']) def edit_book(id): \"\"\" edit", "datatables import ColumnDT, DataTables from .. import auth from ..", "can't be fixed here: # - synopsis() cannot be prevented", "# books=BookMain.get_all_book() query = BookMain.query page = request.args.get('page', 1, type=int)", "synopsis() cannot be prevented from clobbering existing # loaded modules.", "be fixed here: # - synopsis() cannot be prevented from", "field: col name # :param order: asc or desc #", "from booktags.db.basemodels import Book from booktags.flaskapp.model.models import BookMain # ---------------------------------------------------------", "search a date formatted equal to how it is presented", "module is a relative path and # the current directory", "f\"Hello book index : {id}\" @book.route('/post/', methods=['GET', 'POST']) def post_book():", "defining columns # - explicitly cast date to string, so", "# # :param field: col name # :param order: asc", "# instantiating a DataTable for the query and table needed", ".. import auth from .. import db from .forms import", ", put book data :param id: :return: \"\"\" form =", ".. import db from .forms import EditBookForm, HackmdMeta # from", "book.title = form.title.data book.catalogue = form.catalogue.data book.cutter = form.cutter.data book.pub_year", "int(book[-1].id) + 1 print(f\"id is : {id}\") form = EditBookForm()", "= book.reprint form.removed.data = book.removed form.keepsite.data = book.keepsite return render_template('book/edit_book.html',", "book.reprint form.removed.data = book.removed form.keepsite.data = book.keepsite return render_template('book/edit_book.html', form=form)", "form.get_link.data book.note = form.note.data book.reprint = form.reprint.data book.removed = form.removed.data", ":return: \"\"\" form = EditBookForm() book = BookMain.query.filter_by(id=id).first_or_404() if form.validate_on_submit():", "print(temp) form.body.data = temp # flash('Your book data has been", "run flask apps. :copyright: 2019 Miller :license: BSD-3-Clause \"\"\" #", ":param id: :return: \"\"\" form = EditBookForm() book = BookMain.query.filter_by(id=id).first_or_404()", "your purpose query = db.session.query().select_from(BookMain) # GET parameters params =", ": {booksn}\") temp = get_hackmdmeta(booksn) # print(temp) form.body.data = temp", "render_template('book/index.html',books=books,pagination=pagination) # @book.route('/list/', methods=['GET', 'POST']) # def list_book(): # \"\"\"", "redirect(url_for('book.index')) form.id.data = id return render_template('book/edit_book.html', form=form) @book.route('/edit/<int:id>', methods=['GET', 'POST'])", "data(): \"\"\"Return server side data.\"\"\" # defining columns # -", "updated.', 'success') return redirect(url_for('book.index')) form.id.data = book.id form.isbn.data = book.isbn", "booktags.flaskapp.model.models import BookMain # --------------------------------------------------------- common routines @book.after_app_request def after_request(response):", "string searching the date # will search a date formatted", "from sqlalchemy.sql.expression import cast from datatables import ColumnDT, DataTables from", "'success') # return redirect(url_for('book.hackmd_meta')) return render_template('book/hackmd_meta.html',form=form) if __name__ == '__main__':", "desc # :return: renew query # \"\"\" # books =", "index(): # books=BookMain.get_all_book() query = BookMain.query page = request.args.get('page', 1,", "from flask_login import login_user, logout_user, login_required, current_user from . import", "logout_user, login_required, current_user from . import book from flask_sqlalchemy import", "print(f\"booksn is : {booksn}\") temp = get_hackmdmeta(booksn) # print(temp) form.body.data", "get_debug_queries from sqlalchemy.sql.expression import cast from datatables import ColumnDT, DataTables", "edit_book(id): \"\"\" edit , put book data :param id: :return:", "\"\"\" form = EditBookForm() book = BookMain.query.filter_by(id=id).first_or_404() if form.validate_on_submit(): #", "methods=['GET', 'POST']) def post_book(): \"\"\" post new book entry :return:", "form.title.data book.catalogue = form.catalogue.data book.cutter = form.cutter.data book.pub_year = form.pub_year.data", "= form.title.data book.catalogue = form.catalogue.data book.cutter = form.cutter.data book.pub_year =", ": {id}\") form = EditBookForm() if form.validate_on_submit(): book.id = form.id.data", "request, url_for, flash,jsonify,current_app from flask_login import login_user, logout_user, login_required, current_user", "from .forms import EditBookForm, HackmdMeta # from booktags.db.basemodels import Book", "BookMain # --------------------------------------------------------- common routines @book.after_app_request def after_request(response): for query", "index : {id}\" @book.route('/post/', methods=['GET', 'POST']) def post_book(): \"\"\" post", "def list_book(): # \"\"\" # # :param field: col name", "= HackmdMeta() if form.validate_on_submit(): booksn = str(form.booksn.data) # print(f\"booksn is", "ColumnDT(BookMain.get_link), # ColumnDT(BookMain.note), # ColumnDT(BookMain.reprint), # ColumnDT(BookMain.removed), # ColumnDT(BookMain.keepsite) ]", "book data has been updated.', 'success') return redirect(url_for('book.index')) form.id.data =", "(query.statement, query.parameters, query.duration, query.context)) return response @book.route('/', methods=['GET', 'POST']) def", "the table columns = [ # ColumnDT(cast(BookMain.id, db.Integer)), ColumnDT(BookMain.id), ColumnDT(BookMain.isbn),", "index: del {id}\" @book.route('/hackmdmeta', methods=['GET', 'POST']) def hackmd_meta(): \"\"\" :return:", "side data.\"\"\" # defining columns # - explicitly cast date", "be prevented from clobbering existing # loaded modules. # -", "- synopsis() cannot be prevented from clobbering existing # loaded", "on a module is a relative path and # the", "string, so string searching the date # will search a", "form.id.data book.isbn = form.isbn.data book.title_short = form.title_short.data book.title = form.title.data", "os.chdir(), an incorrect # path will be displayed. from flask", "query, columns) # returns what is needed by DataTable return", "the __file__ attribute on a module is a relative path", "= form.id.data book.isbn = form.isbn.data book.title_short = form.title_short.data book.title =", "'POST']) def data(): \"\"\"Return server side data.\"\"\" # defining columns", "form.body.data = temp # flash('Your book data has been updated.',", "flash,jsonify,current_app from flask_login import login_user, logout_user, login_required, current_user from .", "# ColumnDT(BookMain.get_link), # ColumnDT(BookMain.note), # ColumnDT(BookMain.reprint), # ColumnDT(BookMain.removed), # ColumnDT(BookMain.keepsite)", "BookMain.get_all_book() # return render_template('book/list_book.html',books=books) @book.route(\"/list\") def list_book(): \"\"\"List users with", "def hackmd_meta(): \"\"\" :return: \"\"\" from booktags.vendor.hackmd_meta import get_hackmdmeta form", "book.isbn = form.isbn.data book.title_short = form.title_short.data book.title = form.title.data book.catalogue", "form = EditBookForm() if form.validate_on_submit(): book.id = form.id.data book.isbn =", "the current directory is changed with os.chdir(), an incorrect #", "application to run flask apps. :copyright: 2019 Miller :license: BSD-3-Clause", "form.copy_info.data book.get_link = form.get_link.data book.note = form.note.data book.reprint = form.reprint.data", "book.isbn form.title_short.data = book.title_short form.title.data = book.title form.catalogue.data = book.catalogue", "with os.chdir(), an incorrect # path will be displayed. from", "date formatted equal to how it is presented # in", "ColumnDT(BookMain.note), # ColumnDT(BookMain.reprint), # ColumnDT(BookMain.removed), # ColumnDT(BookMain.keepsite) ] # defining", "del_book(id): return f\"Hello book index: del {id}\" @book.route('/hackmdmeta', methods=['GET', 'POST'])", "'POST']) def hackmd_meta(): \"\"\" :return: \"\"\" from booktags.vendor.hackmd_meta import get_hackmdmeta", "to how it is presented # in the table columns", "1.10.x.\"\"\" return render_template('book/list_book.html') @book.route('/data', methods=['GET', 'POST']) def data(): \"\"\"Return server", "form=form) @book.route('/edit/<int:id>', methods=['GET', 'POST']) def edit_book(id): \"\"\" edit , put", "~~~~~~~~~ A simple command line application to run flask apps.", "form.validate_on_submit(): booksn = str(form.booksn.data) # print(f\"booksn is : {booksn}\") temp", "from booktags.vendor.hackmd_meta import get_hackmdmeta form = HackmdMeta() if form.validate_on_submit(): booksn", "def index(): # books=BookMain.get_all_book() query = BookMain.query page = request.args.get('page',", "been updated.', 'success') # return redirect(url_for('book.hackmd_meta')) return render_template('book/hackmd_meta.html',form=form) if __name__", ". import book from flask_sqlalchemy import get_debug_queries from sqlalchemy.sql.expression import", "ColumnDT(BookMain.removed), # ColumnDT(BookMain.keepsite) ] # defining the initial query depending", "= book.id form.isbn.data = book.isbn form.title_short.data = book.title_short form.title.data =", "= temp # flash('Your book data has been updated.', 'success')", "'POST']) def del_book(id): return f\"Hello book index: del {id}\" @book.route('/hackmdmeta',", "form.copy_info.data = book.copy_info form.get_link.data = book.get_link form.note.data = book.note form.reprint.data", "return render_template('book/list_book.html',books=books) @book.route(\"/list\") def list_book(): \"\"\"List users with DataTables <=", "def data(): \"\"\"Return server side data.\"\"\" # defining columns #", "= form.note.data book.reprint = form.reprint.data book.removed = form.removed.data book.keepsite =", "= request.args.get('page', 1, type=int) pagination = query.order_by(cast(BookMain.id, db.Integer)).paginate( page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'],", "after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning(", "= form.pub_year.data book.copy_info = form.copy_info.data book.get_link = form.get_link.data book.note =", "return render_template('book/index.html',books=books,pagination=pagination) # @book.route('/list/', methods=['GET', 'POST']) # def list_book(): #", "utf-8 -*- \"\"\" example.py ~~~~~~~~~ A simple command line application", "a relative path and # the current directory is changed", "cast from datatables import ColumnDT, DataTables from .. import auth", "book data has been added.', 'success') return redirect(url_for('book.index')) form.id.data =", "put book data :param id: :return: \"\"\" form = EditBookForm()", "# in the table columns = [ # ColumnDT(cast(BookMain.id, db.Integer)),", "if form.validate_on_submit(): # book.id = form.id.data book.isbn = form.isbn.data book.title_short", "book.note = form.note.data book.reprint = form.reprint.data book.removed = form.removed.data book.keepsite", "attribute on a module is a relative path and #", "# defining the initial query depending on your purpose query", "= book.removed form.keepsite.data = book.keepsite return render_template('book/edit_book.html', form=form) @book.route('/del/<int:id>', methods=['GET',", "temp # flash('Your book data has been updated.', 'success') #", "has been added.', 'success') return redirect(url_for('book.index')) form.id.data = id return", "--------------------------------------------------------- common routines @book.after_app_request def after_request(response): for query in get_debug_queries():", "If the __file__ attribute on a module is a relative", "1 print(f\"id is : {id}\") form = EditBookForm() if form.validate_on_submit():", "page = request.args.get('page', 1, type=int) pagination = query.order_by(cast(BookMain.id, db.Integer)).paginate( page,", "= form.title_short.data book.title = form.title.data book.catalogue = form.catalogue.data book.cutter =", "ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info) # ColumnDT(BookMain.get_link), # ColumnDT(BookMain.note), # ColumnDT(BookMain.reprint),", "@book.route('/list/', methods=['GET', 'POST']) # def list_book(): # \"\"\" # #", "request.args.get('page', 1, type=int) pagination = query.order_by(cast(BookMain.id, db.Integer)).paginate( page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False)", "form = HackmdMeta() if form.validate_on_submit(): booksn = str(form.booksn.data) # print(f\"booksn", "= form.keepsite.data db.session.add(book) db.session.commit() flash('Your book data has been added.',", "BookMain.query.filter_by(id=id).first_or_404() if form.validate_on_submit(): # book.id = form.id.data book.isbn = form.isbn.data", "book = BookMain.query.all() id = int(book[-1].id) + 1 print(f\"id is", "'POST']) def edit_book(id): \"\"\" edit , put book data :param", "form.note.data book.reprint = form.reprint.data book.removed = form.removed.data book.keepsite = form.keepsite.data", "per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False) books = pagination.items return render_template('book/index.html',books=books,pagination=pagination) # @book.route('/list/', methods=['GET',", "'success') return redirect(url_for('book.index')) form.id.data = id return render_template('book/edit_book.html', form=form) @book.route('/edit/<int:id>',", "query in get_debug_queries(): if query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query:", "error_out=False) books = pagination.items return render_template('book/index.html',books=books,pagination=pagination) # @book.route('/list/', methods=['GET', 'POST'])", "= id return render_template('book/edit_book.html', form=form) @book.route('/edit/<int:id>', methods=['GET', 'POST']) def edit_book(id):", "apps. :copyright: 2019 Miller :license: BSD-3-Clause \"\"\" # Known bugs", "flask_sqlalchemy import get_debug_queries from sqlalchemy.sql.expression import cast from datatables import", "book entry :return: \"\"\" book = BookMain.query.all() id = int(book[-1].id)", "has been updated.', 'success') return redirect(url_for('book.index')) form.id.data = book.id form.isbn.data", "Known bugs that can't be fixed here: # - synopsis()", "current directory is changed with os.chdir(), an incorrect # path", "table needed rowTable = DataTables(params, query, columns) # returns what", "GET parameters params = request.args.to_dict() # instantiating a DataTable for", "'success') return redirect(url_for('book.index')) form.id.data = book.id form.isbn.data = book.isbn form.title_short.data", "= BookMain.query page = request.args.get('page', 1, type=int) pagination = query.order_by(cast(BookMain.id,", "current_app.logger.warning( 'Slow query: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n' % (query.statement, query.parameters,", "\"\"\" # books = BookMain.get_all_book() # return render_template('book/list_book.html',books=books) @book.route(\"/list\") def", "# - synopsis() cannot be prevented from clobbering existing #", "current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n' % (query.statement,", "query.context)) return response @book.route('/', methods=['GET', 'POST']) def index(): # books=BookMain.get_all_book()", "form.isbn.data = book.isbn form.title_short.data = book.title_short form.title.data = book.title form.catalogue.data", "explicitly cast date to string, so string searching the date", "query depending on your purpose query = db.session.query().select_from(BookMain) # GET", "+ 1 print(f\"id is : {id}\") form = EditBookForm() if", "= EditBookForm() if form.validate_on_submit(): book.id = form.id.data book.isbn = form.isbn.data", "modules. # - If the __file__ attribute on a module", "book.title_short = form.title_short.data book.title = form.title.data book.catalogue = form.catalogue.data book.cutter", "'POST']) def post_book(): \"\"\" post new book entry :return: \"\"\"", "book index : {id}\" @book.route('/post/', methods=['GET', 'POST']) def post_book(): \"\"\"", "book.cutter = form.cutter.data book.pub_year = form.pub_year.data book.copy_info = form.copy_info.data book.get_link", ":copyright: 2019 Miller :license: BSD-3-Clause \"\"\" # Known bugs that", "{booksn}\") temp = get_hackmdmeta(booksn) # print(temp) form.body.data = temp #", "<reponame>MagicSword/Booktags #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" example.py", "form.isbn.data book.title_short = form.title_short.data book.title = form.title.data book.catalogue = form.catalogue.data", "f\"Hello book index: del {id}\" @book.route('/hackmdmeta', methods=['GET', 'POST']) def hackmd_meta():", "# ColumnDT(BookMain.note), # ColumnDT(BookMain.reprint), # ColumnDT(BookMain.removed), # ColumnDT(BookMain.keepsite) ] #", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" example.py ~~~~~~~~~", "BSD-3-Clause \"\"\" # Known bugs that can't be fixed here:", "= BookMain.query.filter_by(id=id).first_or_404() if form.validate_on_submit(): # book.id = form.id.data book.isbn =", "ColumnDT(BookMain.id), ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short), ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info) # ColumnDT(BookMain.get_link),", "= form.cutter.data book.pub_year = form.pub_year.data book.copy_info = form.copy_info.data book.get_link =", "A simple command line application to run flask apps. :copyright:", "data :param id: :return: \"\"\" form = EditBookForm() book =", "form.keepsite.data db.session.add(book) db.session.commit() flash('Your book data has been added.', 'success')", "order: asc or desc # :return: renew query # \"\"\"", "import render_template, redirect, request, url_for, flash,jsonify,current_app from flask_login import login_user,", "render_template('book/list_book.html',books=books) @book.route(\"/list\") def list_book(): \"\"\"List users with DataTables <= 1.10.x.\"\"\"", "post new book entry :return: \"\"\" book = BookMain.query.all() id", "book data has been updated.', 'success') # return redirect(url_for('book.hackmd_meta')) return", "flash('Your book data has been added.', 'success') return redirect(url_for('book.index')) form.id.data", "= book.cutter form.pub_year.data = book.pub_year form.copy_info.data = book.copy_info form.get_link.data =", "relative path and # the current directory is changed with", "query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n'", "form.cutter.data = book.cutter form.pub_year.data = book.pub_year form.copy_info.data = book.copy_info form.get_link.data", "book.id = form.id.data book.isbn = form.isbn.data book.title_short = form.title_short.data book.title", "= EditBookForm() book = BookMain.query.filter_by(id=id).first_or_404() if form.validate_on_submit(): # book.id =", "db.session.add(book) db.session.commit() flash('Your book data has been updated.', 'success') return", ">= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n' %", "jsonify(rowTable.output_result()) @book.route('/get/<int:id>', methods=['GET', 'POST']) def get_book(): return f\"Hello book index", "from .. import auth from .. import db from .forms", "data has been added.', 'success') return redirect(url_for('book.index')) form.id.data = id", "query = BookMain.query page = request.args.get('page', 1, type=int) pagination =", "prevented from clobbering existing # loaded modules. # - If", "# def list_book(): # \"\"\" # # :param field: col", "# print(temp) form.body.data = temp # flash('Your book data has", "\"\"\" :return: \"\"\" from booktags.vendor.hackmd_meta import get_hackmdmeta form = HackmdMeta()", "ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short), ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info) # ColumnDT(BookMain.get_link), #", "= form.get_link.data book.note = form.note.data book.reprint = form.reprint.data book.removed =", "@book.route('/data', methods=['GET', 'POST']) def data(): \"\"\"Return server side data.\"\"\" #", "query and table needed rowTable = DataTables(params, query, columns) #", "= form.copy_info.data book.get_link = form.get_link.data book.note = form.note.data book.reprint =", "] # defining the initial query depending on your purpose", "id = int(book[-1].id) + 1 print(f\"id is : {id}\") form", "been updated.', 'success') return redirect(url_for('book.index')) form.id.data = book.id form.isbn.data =", "that can't be fixed here: # - synopsis() cannot be", "sqlalchemy.sql.expression import cast from datatables import ColumnDT, DataTables from ..", "query: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n' % (query.statement, query.parameters, query.duration, query.context))", "ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info) # ColumnDT(BookMain.get_link), # ColumnDT(BookMain.note), #", "directory is changed with os.chdir(), an incorrect # path will", "get_hackmdmeta(booksn) # print(temp) form.body.data = temp # flash('Your book data", "return render_template('book/edit_book.html', form=form) @book.route('/del/<int:id>', methods=['GET', 'POST']) def del_book(id): return f\"Hello", "= DataTables(params, query, columns) # returns what is needed by", "with DataTables <= 1.10.x.\"\"\" return render_template('book/list_book.html') @book.route('/data', methods=['GET', 'POST']) def", "from flask_sqlalchemy import get_debug_queries from sqlalchemy.sql.expression import cast from datatables", "def list_book(): \"\"\"List users with DataTables <= 1.10.x.\"\"\" return render_template('book/list_book.html')", "= book.note form.reprint.data = book.reprint form.removed.data = book.removed form.keepsite.data =", "# books = BookMain.get_all_book() # return render_template('book/list_book.html',books=books) @book.route(\"/list\") def list_book():", "= book.get_link form.note.data = book.note form.reprint.data = book.reprint form.removed.data =", "str(form.booksn.data) # print(f\"booksn is : {booksn}\") temp = get_hackmdmeta(booksn) #", "book index: del {id}\" @book.route('/hackmdmeta', methods=['GET', 'POST']) def hackmd_meta(): \"\"\"", "# return render_template('book/list_book.html',books=books) @book.route(\"/list\") def list_book(): \"\"\"List users with DataTables", "been added.', 'success') return redirect(url_for('book.index')) form.id.data = id return render_template('book/edit_book.html',", "needed by DataTable return jsonify(rowTable.output_result()) @book.route('/get/<int:id>', methods=['GET', 'POST']) def get_book():", "booksn = str(form.booksn.data) # print(f\"booksn is : {booksn}\") temp =", "= pagination.items return render_template('book/index.html',books=books,pagination=pagination) # @book.route('/list/', methods=['GET', 'POST']) # def", "form.validate_on_submit(): # book.id = form.id.data book.isbn = form.isbn.data book.title_short =", "an incorrect # path will be displayed. from flask import", "cast date to string, so string searching the date #", "columns = [ # ColumnDT(cast(BookMain.id, db.Integer)), ColumnDT(BookMain.id), ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short), ColumnDT(BookMain.title),", "temp = get_hackmdmeta(booksn) # print(temp) form.body.data = temp # flash('Your", "formatted equal to how it is presented # in the", "from . import book from flask_sqlalchemy import get_debug_queries from sqlalchemy.sql.expression", "ColumnDT(BookMain.reprint), # ColumnDT(BookMain.removed), # ColumnDT(BookMain.keepsite) ] # defining the initial", "# Known bugs that can't be fixed here: # -", "login_required, current_user from . import book from flask_sqlalchemy import get_debug_queries", "book.keepsite return render_template('book/edit_book.html', form=form) @book.route('/del/<int:id>', methods=['GET', 'POST']) def del_book(id): return", "methods=['GET', 'POST']) def del_book(id): return f\"Hello book index: del {id}\"", "def edit_book(id): \"\"\" edit , put book data :param id:", "= BookMain.get_all_book() # return render_template('book/list_book.html',books=books) @book.route(\"/list\") def list_book(): \"\"\"List users", "request.args.to_dict() # instantiating a DataTable for the query and table", "# print(f\"booksn is : {booksn}\") temp = get_hackmdmeta(booksn) # print(temp)", "form.removed.data = book.removed form.keepsite.data = book.keepsite return render_template('book/edit_book.html', form=form) @book.route('/del/<int:id>',", "\"\"\" edit , put book data :param id: :return: \"\"\"", "date # will search a date formatted equal to how", "# :param field: col name # :param order: asc or", "- If the __file__ attribute on a module is a", "form.reprint.data book.removed = form.removed.data book.keepsite = form.keepsite.data db.session.add(book) db.session.commit() flash('Your", "# will search a date formatted equal to how it", "asc or desc # :return: renew query # \"\"\" #", "redirect(url_for('book.index')) form.id.data = book.id form.isbn.data = book.isbn form.title_short.data = book.title_short", "DataTables from .. import auth from .. import db from", "changed with os.chdir(), an incorrect # path will be displayed.", "methods=['GET', 'POST']) def get_book(): return f\"Hello book index : {id}\"", "# flash('Your book data has been updated.', 'success') # return", "methods=['GET', 'POST']) # def list_book(): # \"\"\" # # :param", "# path will be displayed. from flask import render_template, redirect,", "will search a date formatted equal to how it is", "in get_debug_queries(): if query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\\nParameters:", "form=form) @book.route('/del/<int:id>', methods=['GET', 'POST']) def del_book(id): return f\"Hello book index:", "{id}\") form = EditBookForm() if form.validate_on_submit(): book.id = form.id.data book.isbn", "defining the initial query depending on your purpose query =", "= book.title form.catalogue.data = book.catalogue form.cutter.data = book.cutter form.pub_year.data =", "form.id.data = id return render_template('book/edit_book.html', form=form) @book.route('/edit/<int:id>', methods=['GET', 'POST']) def", "has been updated.', 'success') # return redirect(url_for('book.hackmd_meta')) return render_template('book/hackmd_meta.html',form=form) if", "displayed. from flask import render_template, redirect, request, url_for, flash,jsonify,current_app from", "depending on your purpose query = db.session.query().select_from(BookMain) # GET parameters", "initial query depending on your purpose query = db.session.query().select_from(BookMain) #", "a date formatted equal to how it is presented #", "= book.keepsite return render_template('book/edit_book.html', form=form) @book.route('/del/<int:id>', methods=['GET', 'POST']) def del_book(id):", "get_hackmdmeta form = HackmdMeta() if form.validate_on_submit(): booksn = str(form.booksn.data) #", "print(f\"id is : {id}\") form = EditBookForm() if form.validate_on_submit(): book.id", "@book.route('/edit/<int:id>', methods=['GET', 'POST']) def edit_book(id): \"\"\" edit , put book", "id: :return: \"\"\" form = EditBookForm() book = BookMain.query.filter_by(id=id).first_or_404() if", "pagination.items return render_template('book/index.html',books=books,pagination=pagination) # @book.route('/list/', methods=['GET', 'POST']) # def list_book():", "incorrect # path will be displayed. from flask import render_template,", "book from flask_sqlalchemy import get_debug_queries from sqlalchemy.sql.expression import cast from", "db.session.add(book) db.session.commit() flash('Your book data has been added.', 'success') return", "book.get_link = form.get_link.data book.note = form.note.data book.reprint = form.reprint.data book.removed", "\"\"\" # # :param field: col name # :param order:", "what is needed by DataTable return jsonify(rowTable.output_result()) @book.route('/get/<int:id>', methods=['GET', 'POST'])", "example.py ~~~~~~~~~ A simple command line application to run flask", "post_book(): \"\"\" post new book entry :return: \"\"\" book =", "# defining columns # - explicitly cast date to string,", "list_book(): # \"\"\" # # :param field: col name #", "date to string, so string searching the date # will", "@book.route('/del/<int:id>', methods=['GET', 'POST']) def del_book(id): return f\"Hello book index: del", "'POST']) def get_book(): return f\"Hello book index : {id}\" @book.route('/post/',", "import get_hackmdmeta form = HackmdMeta() if form.validate_on_submit(): booksn = str(form.booksn.data)", "= form.reprint.data book.removed = form.removed.data book.keepsite = form.keepsite.data db.session.add(book) db.session.commit()", "routines @book.after_app_request def after_request(response): for query in get_debug_queries(): if query.duration", "get_book(): return f\"Hello book index : {id}\" @book.route('/post/', methods=['GET', 'POST'])", "return redirect(url_for('book.index')) form.id.data = id return render_template('book/edit_book.html', form=form) @book.route('/edit/<int:id>', methods=['GET',", "data.\"\"\" # defining columns # - explicitly cast date to", "from .. import db from .forms import EditBookForm, HackmdMeta #", "existing # loaded modules. # - If the __file__ attribute", "form.pub_year.data book.copy_info = form.copy_info.data book.get_link = form.get_link.data book.note = form.note.data", "book.catalogue = form.catalogue.data book.cutter = form.cutter.data book.pub_year = form.pub_year.data book.copy_info", "render_template('book/list_book.html') @book.route('/data', methods=['GET', 'POST']) def data(): \"\"\"Return server side data.\"\"\"", "DataTables <= 1.10.x.\"\"\" return render_template('book/list_book.html') @book.route('/data', methods=['GET', 'POST']) def data():", "EditBookForm, HackmdMeta # from booktags.db.basemodels import Book from booktags.flaskapp.model.models import", "book.title_short form.title.data = book.title form.catalogue.data = book.catalogue form.cutter.data = book.cutter", "__file__ attribute on a module is a relative path and", "import BookMain # --------------------------------------------------------- common routines @book.after_app_request def after_request(response): for", "= str(form.booksn.data) # print(f\"booksn is : {booksn}\") temp = get_hackmdmeta(booksn)", "on your purpose query = db.session.query().select_from(BookMain) # GET parameters params", "\"\"\" post new book entry :return: \"\"\" book = BookMain.query.all()", "flask apps. :copyright: 2019 Miller :license: BSD-3-Clause \"\"\" # Known", "= get_hackmdmeta(booksn) # print(temp) form.body.data = temp # flash('Your book", "def del_book(id): return f\"Hello book index: del {id}\" @book.route('/hackmdmeta', methods=['GET',", "= form.catalogue.data book.cutter = form.cutter.data book.pub_year = form.pub_year.data book.copy_info =", "coding: utf-8 -*- \"\"\" example.py ~~~~~~~~~ A simple command line", "simple command line application to run flask apps. :copyright: 2019", "so string searching the date # will search a date", "def post_book(): \"\"\" post new book entry :return: \"\"\" book", "db.session.commit() flash('Your book data has been added.', 'success') return redirect(url_for('book.index'))", "the query and table needed rowTable = DataTables(params, query, columns)", "clobbering existing # loaded modules. # - If the __file__", "= book.title_short form.title.data = book.title form.catalogue.data = book.catalogue form.cutter.data =", ":license: BSD-3-Clause \"\"\" # Known bugs that can't be fixed", "HackmdMeta # from booktags.db.basemodels import Book from booktags.flaskapp.model.models import BookMain", "import cast from datatables import ColumnDT, DataTables from .. import", "pagination = query.order_by(cast(BookMain.id, db.Integer)).paginate( page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False) books = pagination.items", "# returns what is needed by DataTable return jsonify(rowTable.output_result()) @book.route('/get/<int:id>',", "= form.isbn.data book.title_short = form.title_short.data book.title = form.title.data book.catalogue =", "@book.route('/post/', methods=['GET', 'POST']) def post_book(): \"\"\" post new book entry", "command line application to run flask apps. :copyright: 2019 Miller", "return f\"Hello book index: del {id}\" @book.route('/hackmdmeta', methods=['GET', 'POST']) def", "url_for, flash,jsonify,current_app from flask_login import login_user, logout_user, login_required, current_user from", "# ColumnDT(BookMain.keepsite) ] # defining the initial query depending on", "for query in get_debug_queries(): if query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow", "return render_template('book/list_book.html') @book.route('/data', methods=['GET', 'POST']) def data(): \"\"\"Return server side", "import Book from booktags.flaskapp.model.models import BookMain # --------------------------------------------------------- common routines", "= form.removed.data book.keepsite = form.keepsite.data db.session.add(book) db.session.commit() flash('Your book data", "page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False) books = pagination.items return render_template('book/index.html',books=books,pagination=pagination) # @book.route('/list/',", "%s\\n' % (query.statement, query.parameters, query.duration, query.context)) return response @book.route('/', methods=['GET',", "server side data.\"\"\" # defining columns # - explicitly cast", "the initial query depending on your purpose query = db.session.query().select_from(BookMain)", "db.Integer)), ColumnDT(BookMain.id), ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short), ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info) #", "book.get_link form.note.data = book.note form.reprint.data = book.reprint form.removed.data = book.removed", "book.id form.isbn.data = book.isbn form.title_short.data = book.title_short form.title.data = book.title", "entry :return: \"\"\" book = BookMain.query.all() id = int(book[-1].id) +", "query.order_by(cast(BookMain.id, db.Integer)).paginate( page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False) books = pagination.items return render_template('book/index.html',books=books,pagination=pagination)", "path and # the current directory is changed with os.chdir(),", "form.cutter.data book.pub_year = form.pub_year.data book.copy_info = form.copy_info.data book.get_link = form.get_link.data", "common routines @book.after_app_request def after_request(response): for query in get_debug_queries(): if", "the date # will search a date formatted equal to", "# \"\"\" # books = BookMain.get_all_book() # return render_template('book/list_book.html',books=books) @book.route(\"/list\")", "updated.', 'success') # return redirect(url_for('book.hackmd_meta')) return render_template('book/hackmd_meta.html',form=form) if __name__ ==", "import get_debug_queries from sqlalchemy.sql.expression import cast from datatables import ColumnDT,", "data has been updated.', 'success') # return redirect(url_for('book.hackmd_meta')) return render_template('book/hackmd_meta.html',form=form)", ".forms import EditBookForm, HackmdMeta # from booktags.db.basemodels import Book from", "python3 # -*- coding: utf-8 -*- \"\"\" example.py ~~~~~~~~~ A", "form.title_short.data = book.title_short form.title.data = book.title form.catalogue.data = book.catalogue form.cutter.data", "# --------------------------------------------------------- common routines @book.after_app_request def after_request(response): for query in", "HackmdMeta() if form.validate_on_submit(): booksn = str(form.booksn.data) # print(f\"booksn is :", "it is presented # in the table columns = [", "del {id}\" @book.route('/hackmdmeta', methods=['GET', 'POST']) def hackmd_meta(): \"\"\" :return: \"\"\"", ":param order: asc or desc # :return: renew query #", "login_user, logout_user, login_required, current_user from . import book from flask_sqlalchemy", "ColumnDT(cast(BookMain.id, db.Integer)), ColumnDT(BookMain.id), ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short), ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info)", "for the query and table needed rowTable = DataTables(params, query,", "query # \"\"\" # books = BookMain.get_all_book() # return render_template('book/list_book.html',books=books)", "books = pagination.items return render_template('book/index.html',books=books,pagination=pagination) # @book.route('/list/', methods=['GET', 'POST']) #", "form.note.data = book.note form.reprint.data = book.reprint form.removed.data = book.removed form.keepsite.data", "form.get_link.data = book.get_link form.note.data = book.note form.reprint.data = book.reprint form.removed.data", "will be displayed. from flask import render_template, redirect, request, url_for,", "path will be displayed. from flask import render_template, redirect, request,", "book = BookMain.query.filter_by(id=id).first_or_404() if form.validate_on_submit(): # book.id = form.id.data book.isbn", "ColumnDT(BookMain.keepsite) ] # defining the initial query depending on your", "BookMain.query.all() id = int(book[-1].id) + 1 print(f\"id is : {id}\")", "list_book(): \"\"\"List users with DataTables <= 1.10.x.\"\"\" return render_template('book/list_book.html') @book.route('/data',", ":param field: col name # :param order: asc or desc", "presented # in the table columns = [ # ColumnDT(cast(BookMain.id,", "book.pub_year = form.pub_year.data book.copy_info = form.copy_info.data book.get_link = form.get_link.data book.note", "query = db.session.query().select_from(BookMain) # GET parameters params = request.args.to_dict() #", "redirect, request, url_for, flash,jsonify,current_app from flask_login import login_user, logout_user, login_required,", "book.copy_info form.get_link.data = book.get_link form.note.data = book.note form.reprint.data = book.reprint", "a DataTable for the query and table needed rowTable =", "import ColumnDT, DataTables from .. import auth from .. import", "to string, so string searching the date # will search", "current_user from . import book from flask_sqlalchemy import get_debug_queries from", "# from booktags.db.basemodels import Book from booktags.flaskapp.model.models import BookMain #", "ColumnDT(BookMain.copy_info) # ColumnDT(BookMain.get_link), # ColumnDT(BookMain.note), # ColumnDT(BookMain.reprint), # ColumnDT(BookMain.removed), #", "added.', 'success') return redirect(url_for('book.index')) form.id.data = id return render_template('book/edit_book.html', form=form)", "# - If the __file__ attribute on a module is", "# -*- coding: utf-8 -*- \"\"\" example.py ~~~~~~~~~ A simple", ":return: \"\"\" from booktags.vendor.hackmd_meta import get_hackmdmeta form = HackmdMeta() if", "methods=['GET', 'POST']) def hackmd_meta(): \"\"\" :return: \"\"\" from booktags.vendor.hackmd_meta import", "import book from flask_sqlalchemy import get_debug_queries from sqlalchemy.sql.expression import cast", "form.keepsite.data db.session.add(book) db.session.commit() flash('Your book data has been updated.', 'success')", "columns # - explicitly cast date to string, so string", "a module is a relative path and # the current", "return response @book.route('/', methods=['GET', 'POST']) def index(): # books=BookMain.get_all_book() query", "# :return: renew query # \"\"\" # books = BookMain.get_all_book()", "# - explicitly cast date to string, so string searching", "{id}\" @book.route('/post/', methods=['GET', 'POST']) def post_book(): \"\"\" post new book", "# @book.route('/list/', methods=['GET', 'POST']) # def list_book(): # \"\"\" #", "\"\"\"Return server side data.\"\"\" # defining columns # - explicitly", "db.Integer)).paginate( page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False) books = pagination.items return render_template('book/index.html',books=books,pagination=pagination) #", "book.copy_info = form.copy_info.data book.get_link = form.get_link.data book.note = form.note.data book.reprint", "EditBookForm() if form.validate_on_submit(): book.id = form.id.data book.isbn = form.isbn.data book.title_short", "book.reprint = form.reprint.data book.removed = form.removed.data book.keepsite = form.keepsite.data db.session.add(book)", "booktags.vendor.hackmd_meta import get_hackmdmeta form = HackmdMeta() if form.validate_on_submit(): booksn =", "\"\"\" book = BookMain.query.all() id = int(book[-1].id) + 1 print(f\"id", "from clobbering existing # loaded modules. # - If the", "= form.keepsite.data db.session.add(book) db.session.commit() flash('Your book data has been updated.',", "is needed by DataTable return jsonify(rowTable.output_result()) @book.route('/get/<int:id>', methods=['GET', 'POST']) def", "@book.after_app_request def after_request(response): for query in get_debug_queries(): if query.duration >=", "<= 1.10.x.\"\"\" return render_template('book/list_book.html') @book.route('/data', methods=['GET', 'POST']) def data(): \"\"\"Return", "flask import render_template, redirect, request, url_for, flash,jsonify,current_app from flask_login import", "db.session.query().select_from(BookMain) # GET parameters params = request.args.to_dict() # instantiating a", "new book entry :return: \"\"\" book = BookMain.query.all() id =", "book.title form.catalogue.data = book.catalogue form.cutter.data = book.cutter form.pub_year.data = book.pub_year", "methods=['GET', 'POST']) def index(): # books=BookMain.get_all_book() query = BookMain.query page", "or desc # :return: renew query # \"\"\" # books", "\"\"\" # Known bugs that can't be fixed here: #", "fixed here: # - synopsis() cannot be prevented from clobbering", "to run flask apps. :copyright: 2019 Miller :license: BSD-3-Clause \"\"\"", "DataTable return jsonify(rowTable.output_result()) @book.route('/get/<int:id>', methods=['GET', 'POST']) def get_book(): return f\"Hello", "import db from .forms import EditBookForm, HackmdMeta # from booktags.db.basemodels", "= book.isbn form.title_short.data = book.title_short form.title.data = book.title form.catalogue.data =", "def after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']:", "books=BookMain.get_all_book() query = BookMain.query page = request.args.get('page', 1, type=int) pagination", "name # :param order: asc or desc # :return: renew", "rowTable = DataTables(params, query, columns) # returns what is needed", "'POST']) def index(): # books=BookMain.get_all_book() query = BookMain.query page =", "return redirect(url_for('book.index')) form.id.data = book.id form.isbn.data = book.isbn form.title_short.data =", "methods=['GET', 'POST']) def edit_book(id): \"\"\" edit , put book data", "by DataTable return jsonify(rowTable.output_result()) @book.route('/get/<int:id>', methods=['GET', 'POST']) def get_book(): return", "1, type=int) pagination = query.order_by(cast(BookMain.id, db.Integer)).paginate( page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False) books", "and # the current directory is changed with os.chdir(), an", "2019 Miller :license: BSD-3-Clause \"\"\" # Known bugs that can't", "if form.validate_on_submit(): book.id = form.id.data book.isbn = form.isbn.data book.title_short =", "flask_login import login_user, logout_user, login_required, current_user from . import book", "from booktags.flaskapp.model.models import BookMain # --------------------------------------------------------- common routines @book.after_app_request def", "'Slow query: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n' % (query.statement, query.parameters, query.duration,", "ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info) # ColumnDT(BookMain.get_link), # ColumnDT(BookMain.note), # ColumnDT(BookMain.reprint), #", "parameters params = request.args.to_dict() # instantiating a DataTable for the", "render_template('book/edit_book.html', form=form) @book.route('/del/<int:id>', methods=['GET', 'POST']) def del_book(id): return f\"Hello book", "hackmd_meta(): \"\"\" :return: \"\"\" from booktags.vendor.hackmd_meta import get_hackmdmeta form =", "booktags.db.basemodels import Book from booktags.flaskapp.model.models import BookMain # --------------------------------------------------------- common", "# the current directory is changed with os.chdir(), an incorrect", "render_template('book/edit_book.html', form=form) @book.route('/edit/<int:id>', methods=['GET', 'POST']) def edit_book(id): \"\"\" edit ,", "@book.route('/hackmdmeta', methods=['GET', 'POST']) def hackmd_meta(): \"\"\" :return: \"\"\" from booktags.vendor.hackmd_meta", "= BookMain.query.all() id = int(book[-1].id) + 1 print(f\"id is :", "return f\"Hello book index : {id}\" @book.route('/post/', methods=['GET', 'POST']) def", "@book.route(\"/list\") def list_book(): \"\"\"List users with DataTables <= 1.10.x.\"\"\" return", "how it is presented # in the table columns =", "is changed with os.chdir(), an incorrect # path will be", "if query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\\nParameters: %s\\nDuration: %fs\\nContext:", "ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info) # ColumnDT(BookMain.get_link), # ColumnDT(BookMain.note), # ColumnDT(BookMain.reprint), # ColumnDT(BookMain.removed),", "book.catalogue form.cutter.data = book.cutter form.pub_year.data = book.pub_year form.copy_info.data = book.copy_info", "# loaded modules. # - If the __file__ attribute on", "is presented # in the table columns = [ #", "edit , put book data :param id: :return: \"\"\" form", "books = BookMain.get_all_book() # return render_template('book/list_book.html',books=books) @book.route(\"/list\") def list_book(): \"\"\"List", "is : {booksn}\") temp = get_hackmdmeta(booksn) # print(temp) form.body.data =", "searching the date # will search a date formatted equal", "@book.route('/', methods=['GET', 'POST']) def index(): # books=BookMain.get_all_book() query = BookMain.query", "get_debug_queries(): if query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\\nParameters: %s\\nDuration:", "book.keepsite = form.keepsite.data db.session.add(book) db.session.commit() flash('Your book data has been", "table columns = [ # ColumnDT(cast(BookMain.id, db.Integer)), ColumnDT(BookMain.id), ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short),", "= [ # ColumnDT(cast(BookMain.id, db.Integer)), ColumnDT(BookMain.id), ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short), ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue),", "book.cutter form.pub_year.data = book.pub_year form.copy_info.data = book.copy_info form.get_link.data = book.get_link", "EditBookForm() book = BookMain.query.filter_by(id=id).first_or_404() if form.validate_on_submit(): # book.id = form.id.data", "is : {id}\") form = EditBookForm() if form.validate_on_submit(): book.id =", "\"\"\"List users with DataTables <= 1.10.x.\"\"\" return render_template('book/list_book.html') @book.route('/data', methods=['GET',", "bugs that can't be fixed here: # - synopsis() cannot", "= book.catalogue form.cutter.data = book.cutter form.pub_year.data = book.pub_year form.copy_info.data =", "[ # ColumnDT(cast(BookMain.id, db.Integer)), ColumnDT(BookMain.id), ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short), ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter),", ":return: \"\"\" book = BookMain.query.all() id = int(book[-1].id) + 1", "book.removed form.keepsite.data = book.keepsite return render_template('book/edit_book.html', form=form) @book.route('/del/<int:id>', methods=['GET', 'POST'])", "purpose query = db.session.query().select_from(BookMain) # GET parameters params = request.args.to_dict()", "\"\"\" example.py ~~~~~~~~~ A simple command line application to run", "flash('Your book data has been updated.', 'success') # return redirect(url_for('book.hackmd_meta'))", "line application to run flask apps. :copyright: 2019 Miller :license:", "DataTables(params, query, columns) # returns what is needed by DataTable", "id return render_template('book/edit_book.html', form=form) @book.route('/edit/<int:id>', methods=['GET', 'POST']) def edit_book(id): \"\"\"", "form.id.data = book.id form.isbn.data = book.isbn form.title_short.data = book.title_short form.title.data", "render_template, redirect, request, url_for, flash,jsonify,current_app from flask_login import login_user, logout_user,", "and table needed rowTable = DataTables(params, query, columns) # returns", "flash('Your book data has been updated.', 'success') return redirect(url_for('book.index')) form.id.data", "'POST']) # def list_book(): # \"\"\" # # :param field:", "def get_book(): return f\"Hello book index : {id}\" @book.route('/post/', methods=['GET',", "if form.validate_on_submit(): booksn = str(form.booksn.data) # print(f\"booksn is : {booksn}\")", "needed rowTable = DataTables(params, query, columns) # returns what is", "methods=['GET', 'POST']) def data(): \"\"\"Return server side data.\"\"\" # defining", "ColumnDT, DataTables from .. import auth from .. import db", "Book from booktags.flaskapp.model.models import BookMain # --------------------------------------------------------- common routines @book.after_app_request", "returns what is needed by DataTable return jsonify(rowTable.output_result()) @book.route('/get/<int:id>', methods=['GET',", "# return redirect(url_for('book.hackmd_meta')) return render_template('book/hackmd_meta.html',form=form) if __name__ == '__main__': pass", "= request.args.to_dict() # instantiating a DataTable for the query and", "form = EditBookForm() book = BookMain.query.filter_by(id=id).first_or_404() if form.validate_on_submit(): # book.id", "BookMain.query page = request.args.get('page', 1, type=int) pagination = query.order_by(cast(BookMain.id, db.Integer)).paginate(", "cannot be prevented from clobbering existing # loaded modules. #", "@book.route('/get/<int:id>', methods=['GET', 'POST']) def get_book(): return f\"Hello book index :", "here: # - synopsis() cannot be prevented from clobbering existing", "# book.id = form.id.data book.isbn = form.isbn.data book.title_short = form.title_short.data", "DataTable for the query and table needed rowTable = DataTables(params,", "book.note form.reprint.data = book.reprint form.removed.data = book.removed form.keepsite.data = book.keepsite", "renew query # \"\"\" # books = BookMain.get_all_book() # return", "form.reprint.data = book.reprint form.removed.data = book.removed form.keepsite.data = book.keepsite return", "columns) # returns what is needed by DataTable return jsonify(rowTable.output_result())", "loaded modules. # - If the __file__ attribute on a", "query.duration, query.context)) return response @book.route('/', methods=['GET', 'POST']) def index(): #", "Miller :license: BSD-3-Clause \"\"\" # Known bugs that can't be" ]
[ "png = 'image/png' txt = 'text/plain; charset=us-ascii' json = 'application/json'", "Enum class content_type(Enum): # https://www.iana.org/assignments/media-types/media-types.xhtml css = 'text/css' gif =", "# https://www.iana.org/assignments/media-types/media-types.xhtml css = 'text/css' gif = 'image/gif' htm =", "'image/jpeg' js = 'application/javascript' png = 'image/png' txt = 'text/plain;", "import Enum class content_type(Enum): # https://www.iana.org/assignments/media-types/media-types.xhtml css = 'text/css' gif", "'text/css' gif = 'image/gif' htm = 'text/html' html = 'text/html'", "class content_type(Enum): # https://www.iana.org/assignments/media-types/media-types.xhtml css = 'text/css' gif = 'image/gif'", "enum import Enum class content_type(Enum): # https://www.iana.org/assignments/media-types/media-types.xhtml css = 'text/css'", "'image/jpeg' jpeg = 'image/jpeg' js = 'application/javascript' png = 'image/png'", "= 'application/javascript' png = 'image/png' txt = 'text/plain; charset=us-ascii' json", "https://www.iana.org/assignments/media-types/media-types.xhtml css = 'text/css' gif = 'image/gif' htm = 'text/html'", "= 'text/css' gif = 'image/gif' htm = 'text/html' html =", "txt = 'text/plain; charset=us-ascii' json = 'application/json' svg = 'image/svg+xml'", "= 'image/png' txt = 'text/plain; charset=us-ascii' json = 'application/json' svg", "'image/bmp' jpg = 'image/jpeg' jpeg = 'image/jpeg' js = 'application/javascript'", "'text/html' html = 'text/html' ico = 'image/bmp' jpg = 'image/jpeg'", "from enum import Enum class content_type(Enum): # https://www.iana.org/assignments/media-types/media-types.xhtml css =", "'image/gif' htm = 'text/html' html = 'text/html' ico = 'image/bmp'", "gif = 'image/gif' htm = 'text/html' html = 'text/html' ico", "= 'image/jpeg' js = 'application/javascript' png = 'image/png' txt =", "= 'text/html' ico = 'image/bmp' jpg = 'image/jpeg' jpeg =", "'text/html' ico = 'image/bmp' jpg = 'image/jpeg' jpeg = 'image/jpeg'", "'application/javascript' png = 'image/png' txt = 'text/plain; charset=us-ascii' json =", "jpg = 'image/jpeg' jpeg = 'image/jpeg' js = 'application/javascript' png", "'image/png' txt = 'text/plain; charset=us-ascii' json = 'application/json' svg =", "= 'image/jpeg' jpeg = 'image/jpeg' js = 'application/javascript' png =", "jpeg = 'image/jpeg' js = 'application/javascript' png = 'image/png' txt", "html = 'text/html' ico = 'image/bmp' jpg = 'image/jpeg' jpeg", "js = 'application/javascript' png = 'image/png' txt = 'text/plain; charset=us-ascii'", "content_type(Enum): # https://www.iana.org/assignments/media-types/media-types.xhtml css = 'text/css' gif = 'image/gif' htm", "css = 'text/css' gif = 'image/gif' htm = 'text/html' html", "= 'image/bmp' jpg = 'image/jpeg' jpeg = 'image/jpeg' js =", "= 'image/gif' htm = 'text/html' html = 'text/html' ico =", "htm = 'text/html' html = 'text/html' ico = 'image/bmp' jpg", "= 'text/html' html = 'text/html' ico = 'image/bmp' jpg =", "ico = 'image/bmp' jpg = 'image/jpeg' jpeg = 'image/jpeg' js" ]